ductai199x commited on
Commit
0b94f9c
·
1 Parent(s): 6323f9c

put in final fix

Browse files
Files changed (1) hide show
  1. video_std_manip.py +9 -12
video_std_manip.py CHANGED
@@ -21,7 +21,7 @@ _CITATION = """\
21
  """
22
 
23
  _DESCRIPTION = """\
24
- This dataset is a collection of simple and traditional localized video manipulations, such as: splicing, color correction, contrast enhancement, bluring, and noise addition. The dataset is designed to be used for training and evaluating video manipulation detection models. We used this dataset to train the VideoFACT model, which is a deep learning model that uses attention, scene context, and forensic traces to detect a wide variety of video forgery types, i.e. splicing, editing, deepfake, inpainting. The dataset is divided into three parts: Video Camera Model Splicing (VCMS), Video Perceptually Visible Manipulation (VPVM), and Video Perceptually Invisible Manipulation (VPIM). Each part has a total of 2000 videos, each video is 1 second, or 30 frames, has a resolution of 1920 x 1080, and encoded using FFmpeg with the H.264 codec at CRF 23. Additionally, each part is splited into training, validation, and testing sets that consists of 1600, 100, 300 videos, respectively. More details about the dataset can be found in the paper.
25
  """
26
 
27
  _HOMEPAGE = "https://github.com/ductai199x/videofact-wacv-2024"
@@ -41,7 +41,6 @@ class VideoStdManip(datasets.GeneratorBasedBuilder):
41
  """This dataset is a collection of simple and traditional localized video manipulations, such as: splicing, color correction, contrast enhancement, bluring, and noise addition. The dataset is divided into three parts: Video Camera Model Splicing (VCMS), Video Perceptually Visible Manipulation (VPVM), and Video Perceptually Invisible Manipulation (VPIM)."""
42
 
43
  VERSION = datasets.Version("1.0.0")
44
- IN_MEMORY_MAX_SIZE = 1.0e10
45
 
46
  # This is an example of a dataset with multiple configurations.
47
  # If you don't want/need to define several sub-sets in your dataset,
@@ -65,8 +64,8 @@ class VideoStdManip(datasets.GeneratorBasedBuilder):
65
  def _info(self):
66
  features = datasets.Features(
67
  {
68
- "frames": datasets.Array4D(shape=(30, 1080, 1920, 3), dtype="uint8"),
69
- "masks": datasets.Array3D(shape=(30, 1080, 1920), dtype="uint8"),
70
  "label": datasets.ClassLabel(num_classes=2),
71
  # These are the features of your dataset like images, labels ...
72
  }
@@ -137,16 +136,14 @@ class VideoStdManip(datasets.GeneratorBasedBuilder):
137
  for key, (label, vid_id) in enumerate(all_vid_ids):
138
  label = 0 if label == "real" else 1
139
  if label == 1:
140
- frames = decord.VideoReader(open(os.path.join(part_dir, "manipulated", vid_id + ".mp4"), "rb"))[:]
141
- masks = decord.VideoReader(open(os.path.join(part_dir, "mask", vid_id + ".mp4"), "rb"))[:]
142
  else:
143
- frames = decord.VideoReader(open(os.path.join(part_dir, "original", vid_id + ".mp4"), "rb"))[:]
144
- masks = torch.zeros_like(frames, dtype=torch.uint8)
145
-
146
- masks = ((masks.float().mean(3) / 255.0) > 0.5).to(torch.uint8)
147
 
148
  yield key, {
149
- "frames": frames,
150
- "masks": masks,
151
  "label": label,
152
  }
 
21
  """
22
 
23
  _DESCRIPTION = """\
24
+ This dataset is a collection of simple and traditional localized video manipulations, such as: splicing, color correction, contrast enhancement, bluring, and noise addition. The dataset is designed to be used for training and evaluating video manipulation detection models. We used this dataset to train the VideoFACT model, which is a deep learning model that uses attention, scene context, and forensic traces to detect a wide variety of video forgery types, i.e. splicing, editing, deepfake, inpainting. The dataset is divided into three parts: Video Camera Model Splicing (VCMS), Video Perceptually Visible Manipulation (VPVM), and Video Perceptually Invisible Manipulation (VPIM). Each part has a total of 4000 videos, each video is 1 second, or 30 frames, has a resolution of 1920 x 1080, and encoded using FFmpeg with the H.264 codec at CRF 23. Additionally, each part is splited into training, validation, and testing sets that consists of 3200, 200, 600 videos, respectively. More details about the dataset can be found in the paper.
25
  """
26
 
27
  _HOMEPAGE = "https://github.com/ductai199x/videofact-wacv-2024"
 
41
  """This dataset is a collection of simple and traditional localized video manipulations, such as: splicing, color correction, contrast enhancement, bluring, and noise addition. The dataset is divided into three parts: Video Camera Model Splicing (VCMS), Video Perceptually Visible Manipulation (VPVM), and Video Perceptually Invisible Manipulation (VPIM)."""
42
 
43
  VERSION = datasets.Version("1.0.0")
 
44
 
45
  # This is an example of a dataset with multiple configurations.
46
  # If you don't want/need to define several sub-sets in your dataset,
 
64
  def _info(self):
65
  features = datasets.Features(
66
  {
67
+ "vid_path": datasets.Value("string"),
68
+ "mask_path": datasets.Value("string"),
69
  "label": datasets.ClassLabel(num_classes=2),
70
  # These are the features of your dataset like images, labels ...
71
  }
 
136
  for key, (label, vid_id) in enumerate(all_vid_ids):
137
  label = 0 if label == "real" else 1
138
  if label == 1:
139
+ vid_path = os.path.join(part_dir, "manipulated", vid_id + ".mp4")
140
+ mask_path = os.path.join(part_dir, "mask", vid_id + ".mp4")
141
  else:
142
+ vid_path = os.path.join(part_dir, "original", vid_id + ".mp4")
143
+ mask_path = ""
 
 
144
 
145
  yield key, {
146
+ "vid_path": vid_path,
147
+ "mask_path": mask_path,
148
  "label": label,
149
  }