From cc0a55dcfbbf28830e63484b17fdcd815e7936f0 Mon Sep 17 00:00:00 2001 From: Kok Wei <52521979+kwcckw@users.noreply.github.com> Date: Sat, 5 Aug 2023 21:41:16 +0800 Subject: [PATCH] Added new benchmark results and added new noise type in BadPhotoCopy. --- README.md | 82 ++--- augraphy/augmentations/badphotocopy.py | 11 +- augraphy/utilities/noisegenerator.py | 400 +++++++++++++++++-------- benchmark/README.md | 82 ++--- 4 files changed, 361 insertions(+), 214 deletions(-) diff --git a/README.md b/README.md index ff618310..bacb1d70 100644 --- a/README.md +++ b/README.md @@ -59,49 +59,49 @@ The benchmark results are computed with Augraphy 8.20 and Tobacco3482 dataset (r | Augmentation |Images per second|Memory usage (MB)| |--------------------|----------------:|----------------:| -|BadPhotoCopy | 0.38| 178.20| -|BindingsAndFasteners| 29.34| 21.43| -|BleedThrough | 0.39| 684.69| -|BookBinding | 0.08| 611.74| -|Brightness | 5.93| 147.99| -|BrightnessTexturize | 2.12| 181.74| -|ColorPaper | 5.12| 105.66| -|ColorShift | 0.87| 114.26| +|BadPhotoCopy | 0.37| 178.20| +|BindingsAndFasteners| 31.40| 21.43| +|BleedThrough | 0.43| 684.69| +|BookBinding | 0.11| 611.74| +|Brightness | 5.75| 147.99| +|BrightnessTexturize | 2.16| 181.74| +|ColorPaper | 5.07| 105.66| +|ColorShift | 0.88| 114.26| |DelaunayTessellation| 0.14| 60.00| -|DirtyDrum | 0.82| 482.63| -|DirtyRollers | 1.58| 249.52| -|Dithering | 3.96| 126.82| -|DotMatrix | 0.22| 80.53| -|Faxify | 2.02| 126.75| -|Folding | 2.85| 63.27| -|Gamma | 34.27| 25.36| -|Geometric | 189.37| 12.68| -|GlitchEffect | 1.48| 126.94| +|DirtyDrum | 0.96| 482.63| +|DirtyRollers | 1.63| 249.52| +|Dithering | 3.82| 126.82| +|DotMatrix | 0.64| 80.77| +|Faxify | 2.12| 126.74| +|Folding | 2.62| 63.28| +|Gamma | 34.39| 25.36| +|Geometric | 183.89| 12.68| +|GlitchEffect | 1.49| 126.94| |Hollow | 0.21| 343.17| -|InkBleed | 3.93| 177.51| -|InkColorSwap | 5.22| 51.99| -|InkMottling | 7.78| 55.99| -|InkShifter | 0.17| 426.89| -|Jpeg | 6.32| 25.85| -|Letterpress | 0.45| 158.10| -|LightingGradient | 0.44| 638.31| -|LinesDegradation | 1.69| 175.38| -|LowInkPeriodicLines | 6.47| 12.74| -|LowInkRandomLines | 114.02| 12.74| -|LowLightNoise | 0.30| 481.95| -|Markup | 2.97| 154.12| -|NoiseTexturize | 0.73| 249.36| -|NoisyLines | 0.86| 446.76| -|PageBorder | 0.32| 201.22| -|PatternGenerator | 1.68| 51.52| -|ReflectedLight | 0.08| 109.97| -|Scribbles | 1.00| 97.13| -|SectionShift | 129.82| 12.96| -|ShadowCast | 0.95| 50.79| -|Squish | 0.26| 465.31| -|SubtleNoise | 1.68| 202.87| -|VoronoiTessellation | 0.08| 57.48| -|WaterMark | 2.70| 352.34| +|InkBleed | 4.00| 177.51| +|InkColorSwap | 5.10| 51.99| +|InkMottling | 7.60| 55.99| +|InkShifter | 0.20| 426.89| +|Jpeg | 6.38| 25.85| +|Letterpress | 0.46| 158.10| +|LightingGradient | 0.47| 638.31| +|LinesDegradation | 1.51| 175.38| +|LowInkPeriodicLines | 6.19| 12.74| +|LowInkRandomLines | 144.08| 12.74| +|LowLightNoise | 0.32| 481.95| +|Markup | 2.54| 154.12| +|NoiseTexturize | 0.96| 249.36| +|NoisyLines | 1.04| 446.89| +|PageBorder | 0.56| 191.84| +|PatternGenerator | 1.00| 51.53| +|ReflectedLight | 0.08| 109.92| +|Scribbles | 0.93| 99.10| +|SectionShift | 154.45| 12.95| +|ShadowCast | 0.86| 50.80| +|Squish | 0.99| 443.70| +|SubtleNoise | 1.82| 202.87| +|VoronoiTessellation | 0.09| 58.18| +|WaterMark | 2.55| 390.55| # Alternative Augmentation Libraries There are plenty of choices when it comes to [augmentation libraries](https://github.com/AgaMiko/data-augmentation-review). However, only Augraphy is designed to address everyday office automation needs associated with paper-oriented process distortions that come from printing, faxing, scanning and copy machines. Most other libraries focus on video and images pertinent to camera-oriented data sources and problem domains. Augraphy is focused on supporting problems related to automation of document images such as OCR, form recognition, form data extraction, document classification, barcode decoding, denoising, document restoration, identity document data extraction, document cropping, etc. Eventually, Augraphy will be able to support photo OCR problems with augmentations designed to emulate camera phone distortions. diff --git a/augraphy/augmentations/badphotocopy.py b/augraphy/augmentations/badphotocopy.py index 22a0d6e8..ef7ec8d2 100644 --- a/augraphy/augmentations/badphotocopy.py +++ b/augraphy/augmentations/badphotocopy.py @@ -214,7 +214,7 @@ def apply_augmentation(self, image): # get image dimensions ysize, xsize = image.shape[:2] - if self.noise_side == "random" or self.noise_side not in ["left", "top", "right", "bottom"]: + if self.noise_side == "random": noise_side = random.choice(["left", "top", "right", "bottom"]) else: noise_side = self.noise_side @@ -238,19 +238,16 @@ def apply_augmentation(self, image): noise_size=self.noise_size, noise_sparsity=self.noise_sparsity, noise_concentration=self.noise_concentration, + xsize=xsize, + ysize=ysize, ) - # new size after rotation - mask_ysize, mask_xsize = mask.shape[:2] - # rescale to 0 -255 mask = ((mask - np.min(mask)) / (np.max(mask) - np.min(mask))) * 255 if self.noise_value[0] > self.noise_value[1]: self.noise_value[0] = self.noise_value[1] - # creates random small dot of noises - mask += random.randint(self.noise_value[0], self.noise_value[1]) - mask[mask > 255] = 255 + # resize back to original size mask = cv2.resize(mask, (xsize, ysize)).astype("uint8") # apply blur to mask of noise diff --git a/augraphy/utilities/noisegenerator.py b/augraphy/utilities/noisegenerator.py index 677bb813..2d826519 100644 --- a/augraphy/utilities/noisegenerator.py +++ b/augraphy/utilities/noisegenerator.py @@ -13,6 +13,7 @@ class NoiseGenerator: 2 = noise with regular pattern 3 = noise at all borders of image 4 = sparse and little noise + 5 = gaussian noise :type noise_type: int, optional :param noise_side: Location of generated noise. Choose from: "left", "right", "top", "bottom","top_left", "top_right", "bottom_left", "bottom_right". @@ -270,153 +271,302 @@ def generate_mask_main( :type ysize: int """ - # get max of y or x size - max_size = max(xsize, ysize) + if noise_type not in [1, 2, 3, 4]: - # generate number of clusters and number of samples in each cluster - n_samples_array = self.generate_clusters_and_samples( - noise_type, - noise_concentration, - max_size, - ) + if noise_type == 5: - # For sparsity of the noises (distance to centroid of cluster) - std, center_x, center_y = self.generate_sparsity_std( - noise_type, - noise_side, - noise_sparsity, - xsize, - ysize, - max_size, - ) + img_mask = np.full((ysize, xsize), fill_value=255, dtype="float") - if noise_type == 2: + iterations = random.randint(3, 8) + for _ in range(iterations): + # random parameters for gaussian noise + mean = random.randint(0, 255) + sigma = random.randint(0, 255) + ratio = random.randint(5, 10) - # reduce sparsity - std = int(std / 5) + # generate random gaussian noise + img_gaussian = np.random.normal(mean, sigma, (int(xsize / ratio), int(ysize / ratio))) + img_gaussian = cv2.resize(img_gaussian, (xsize, ysize), interpolation=cv2.INTER_LINEAR) - # size of noise depends on noise sparsity - random_sparsity = np.random.uniform(noise_sparsity[0], noise_sparsity[1]) - end_y = max((random_sparsity) * ysize, int(ysize / 10)) + # add gaussian noise + img_mask += img_gaussian - # randomize noise pattern - n_step_x = int(xsize / random.randint(10, 14)) - n_step_y = int(ysize / random.randint(16, 20)) + # change image to uint8 after the summation + img_mask = img_mask.astype("uint8") - # initialize points array - generated_points_x = np.array([[-1]], dtype="int") - generated_points_y = np.array([[-1]], dtype="int") + elif noise_type == 6: + # future development: More noise type + pass - # initial noise location - ccenter_y = (0, 0) - ccenter_x = (0, 0) + # threshold to increase or decrease the noise concentration + noise_threshold = int(random.uniform(noise_concentration[0], noise_concentration[1]) * 255) - while ccenter_y[1] < end_y: + # mask of background + img_background = np.random.randint( + noise_background[0], + noise_background[1] + 1, + size=(ysize, xsize), + dtype="uint8", + ) + indices_background = img_mask > noise_threshold + + # temporary assignment + min_value = np.min(img_mask) + img_mask[indices_background] = min_value + + # scale value to provided input range + current_min = np.min(img_mask) + current_max = np.max(img_mask) + img_mask = ( + ((img_mask.astype("float") - current_min) / (current_max - current_min)) + * (noise_value[1] - noise_value[0]) + + noise_value[0] + ).astype("uint8") + + # update background value + img_mask[indices_background] = img_background[indices_background] + + # generate sparsity + sparsity = random.uniform(noise_sparsity[0], noise_sparsity[1]) + + # reduce noise area based on sparsity value + img_sparsity_value = np.arange(-0.3, 1, 1 / int(ysize * sparsity), dtype="float") + length = len(img_sparsity_value) + img_sparsity_value = img_sparsity_value.reshape(length, 1).repeat(xsize, 1) + img_sparsity_value[img_sparsity_value < 0] = 0 + img_sparsity_value = cv2.resize( + img_sparsity_value, + (xsize, int(ysize * sparsity)), + interpolation=cv2.INTER_LINEAR, + ) + img_sparsity_value *= 255 + img_sparsity_mask = np.full((ysize, xsize), fill_value=255, dtype="float") - # reduce sample to generate gradient in noise - samples_index = np.ceil(len(n_samples_array) / 2).astype("int") - n_samples_array = n_samples_array[:samples_index] + # map noise image back to mask based on noise side + if noise_side == "top": + img_sparsity_mask[: int(ysize * sparsity), :] = img_sparsity_value + elif noise_side == "left": + img_sparsity_value = cv2.resize( + np.rot90(img_sparsity_value, 1), + (int(xsize * sparsity), ysize), + interpolation=cv2.INTER_LINEAR, + ) + img_sparsity_mask[:, : int(xsize * sparsity)] = img_sparsity_value + elif noise_side == "right": + img_sparsity_value = cv2.resize( + np.rot90(img_sparsity_value, 3), + (int(xsize * sparsity), ysize), + interpolation=cv2.INTER_LINEAR, + ) + img_sparsity_mask[:, -int(xsize * sparsity) :] = img_sparsity_value + elif noise_side == "bottom": + img_sparsity_value = np.flipud(img_sparsity_value) + img_sparsity_mask[-int(ysize * sparsity) :, :] = img_sparsity_value + elif noise_side == "top_left": + cysize, cxsize = img_sparsity_value.shape[:2] + img_sparsity_value_rot = cv2.resize( + np.rot90(img_sparsity_value, 3), + (cxsize, cysize), + interpolation=cv2.INTER_LINEAR, + ) + img_sparsity_value = img_sparsity_value * img_sparsity_value_rot + img_sparsity_value = (img_sparsity_value - np.min(img_sparsity_value)) / ( + np.max(img_sparsity_value) - np.min(img_sparsity_value) + ) + img_sparsity_value = 255 - (img_sparsity_value * 255) + img_sparsity_mask[: int(ysize * sparsity), :] = np.flipud(img_sparsity_value) + elif noise_side == "top_right": + cysize, cxsize = img_sparsity_value.shape[:2] + img_sparsity_value_rot = cv2.resize( + np.rot90(img_sparsity_value, 1), + (cxsize, cysize), + interpolation=cv2.INTER_LINEAR, + ) + img_sparsity_value = img_sparsity_value * img_sparsity_value_rot + img_sparsity_value = (img_sparsity_value - np.min(img_sparsity_value)) / ( + np.max(img_sparsity_value) - np.min(img_sparsity_value) + ) + img_sparsity_value = 255 - (img_sparsity_value * 255) + img_sparsity_mask[: int(ysize * sparsity), :] = np.flipud(img_sparsity_value) + elif noise_side == "bottom_left": + cysize, cxsize = img_sparsity_value.shape[:2] + img_sparsity_value_rot = cv2.resize( + np.rot90(img_sparsity_value, 3), + (cxsize, cysize), + interpolation=cv2.INTER_LINEAR, + ) + img_sparsity_value = img_sparsity_value * img_sparsity_value_rot + img_sparsity_value = (img_sparsity_value - np.min(img_sparsity_value)) / ( + np.max(img_sparsity_value) - np.min(img_sparsity_value) + ) + img_sparsity_value = 255 - (img_sparsity_value * 255) + img_sparsity_mask[-int(ysize * sparsity) :, :] = img_sparsity_value + elif noise_side == "bottom_right": + cysize, cxsize = img_sparsity_value.shape[:2] + img_sparsity_value_rot = cv2.resize( + np.rot90(img_sparsity_value, 1), + (cxsize, cysize), + interpolation=cv2.INTER_LINEAR, + ) + img_sparsity_value = img_sparsity_value * img_sparsity_value_rot + img_sparsity_value = (img_sparsity_value - np.min(img_sparsity_value)) / ( + np.max(img_sparsity_value) - np.min(img_sparsity_value) + ) + img_sparsity_value = 255 - (img_sparsity_value * 255) + img_sparsity_mask[-int(ysize * sparsity) :, :] = img_sparsity_value - # varying y - ccenter_y = (ccenter_y[1], ccenter_y[1] + n_step_y) - ccenter_x = (0, 0) + # multiply noise with sparsity mask + img_mask = 255 - cv2.multiply(255 - img_mask, 255 - img_sparsity_mask.astype("uint8"), scale=1 / 255) - check_break = 0 - while True: - # varying x - ccenter_x = (ccenter_x[0], ccenter_x[1] + n_step_x) - - # generate coordinates for clusters of blobs - cgenerated_points_x, cgenerated_points_y = self.generate_points( - n_samples_array, - int(std / 5), - ccenter_x, - ccenter_y, - xsize, - ysize, - ) + # noise type 1, 2, 3, 4 - generate noise with scikit-learn's make_blobs + else: + # get max of y or x size + max_size = max(xsize, ysize) - # combine coordinates - generated_points_x = np.concatenate( - [generated_points_x, cgenerated_points_x], - ) - generated_points_y = np.concatenate( - [generated_points_y, cgenerated_points_y], - ) + # generate number of clusters and number of samples in each cluster + n_samples_array = self.generate_clusters_and_samples( + noise_type, + noise_concentration, + max_size, + ) - # space between next noise patch - add_space = random.randint(10, 20) - ccenter_x = ( - ccenter_x[0] + n_step_x + add_space, - ccenter_x[1] + n_step_x + add_space, - ) + # For sparsity of the noises (distance to centroid of cluster) + std, center_x, center_y = self.generate_sparsity_std( + noise_type, + noise_side, + noise_sparsity, + xsize, + ysize, + max_size, + ) - # to break out from inner loop - if check_break: - break - elif ccenter_x[1] > xsize: - ccenter_x = (xsize - 1, xsize - 1) - check_break = 1 + if noise_type == 2: - # space between next noise patch - add_space = random.randint(5, 15) + # reduce sparsity + std = int(std / 5) - ccenter_y = (ccenter_y[1] + add_space, ccenter_y[1] + add_space) + # size of noise depends on noise sparsity + random_sparsity = np.random.uniform(noise_sparsity[0], noise_sparsity[1]) + end_y = max((random_sparsity) * ysize, int(ysize / 10)) - # generate mask - img_mask = self.generate_mask( - noise_background, - noise_value, - generated_points_x, - generated_points_y, - xsize, - ysize, - ) + # randomize noise pattern + n_step_x = int(xsize / random.randint(10, 14)) + n_step_y = int(ysize / random.randint(16, 20)) - # rotate mask according to noise_side - if noise_side == "top" or noise_side == "top_left" or noise_side == "top_right": - img_mask = img_mask - elif noise_side == "bottom" or noise_side == "bottom_left" or noise_side == "bottom_right": - img_mask = np.flipud(img_mask) - elif noise_side == "left": - img_mask = np.rot90(img_mask, 1) - elif noise_side == "right": - img_mask = np.rot90(img_mask, 3) - else: - img_mask = np.rot90(img_mask, random.randint(0, 3)) + # initialize points array + generated_points_x = np.array([[-1]], dtype="int") + generated_points_y = np.array([[-1]], dtype="int") - else: - # generate coordinates for clusters of blobs - generated_points_x, generated_points_y = self.generate_points( - n_samples_array, - std, - center_x, - center_y, - xsize, - ysize, - ) + # initial noise location + ccenter_y = (0, 0) + ccenter_x = (0, 0) - # generate mask - img_mask = self.generate_mask( - noise_background, - noise_value, - generated_points_x, - generated_points_y, - xsize, - ysize, - ) + while ccenter_y[1] < end_y: + + # reduce sample to generate gradient in noise + samples_index = np.ceil(len(n_samples_array) / 2).astype("int") + n_samples_array = n_samples_array[:samples_index] + + # varying y + ccenter_y = (ccenter_y[1], ccenter_y[1] + n_step_y) + ccenter_x = (0, 0) + + check_break = 0 + while True: + # varying x + ccenter_x = (ccenter_x[0], ccenter_x[1] + n_step_x) + + # generate coordinates for clusters of blobs + cgenerated_points_x, cgenerated_points_y = self.generate_points( + n_samples_array, + int(std / 5), + ccenter_x, + ccenter_y, + xsize, + ysize, + ) + + # combine coordinates + generated_points_x = np.concatenate( + [generated_points_x, cgenerated_points_x], + ) + generated_points_y = np.concatenate( + [generated_points_y, cgenerated_points_y], + ) + + # space between next noise patch + add_space = random.randint(10, 20) + ccenter_x = ( + ccenter_x[0] + n_step_x + add_space, + ccenter_x[1] + n_step_x + add_space, + ) + + # to break out from inner loop + if check_break: + break + elif ccenter_x[1] > xsize: + ccenter_x = (xsize - 1, xsize - 1) + check_break = 1 - # rotate and merge mask into 4 sides - if noise_type == 3: - img_mask = np.minimum( - img_mask, - cv2.resize(np.rot90(img_mask), (xsize, ysize), interpolation=cv2.INTER_AREA), + # space between next noise patch + add_space = random.randint(5, 15) + + ccenter_y = (ccenter_y[1] + add_space, ccenter_y[1] + add_space) + + # generate mask + img_mask = self.generate_mask( + noise_background, + noise_value, + generated_points_x, + generated_points_y, + xsize, + ysize, ) - img_mask = np.minimum( - img_mask, - cv2.resize(np.rot90(img_mask, k=2), (xsize, ysize), interpolation=cv2.INTER_AREA), + + # rotate mask according to noise_side + if noise_side == "top" or noise_side == "top_left" or noise_side == "top_right": + img_mask = img_mask + elif noise_side == "bottom" or noise_side == "bottom_left" or noise_side == "bottom_right": + img_mask = np.flipud(img_mask) + elif noise_side == "left": + img_mask = np.rot90(img_mask, 1) + elif noise_side == "right": + img_mask = np.rot90(img_mask, 3) + else: + img_mask = np.rot90(img_mask, random.randint(0, 3)) + + else: + # generate coordinates for clusters of blobs + generated_points_x, generated_points_y = self.generate_points( + n_samples_array, + std, + center_x, + center_y, + xsize, + ysize, + ) + + # generate mask + img_mask = self.generate_mask( + noise_background, + noise_value, + generated_points_x, + generated_points_y, + xsize, + ysize, ) + # rotate and merge mask into 4 sides + if noise_type == 3: + img_mask = np.minimum( + img_mask, + cv2.resize(np.rot90(img_mask), (xsize, ysize), interpolation=cv2.INTER_AREA), + ) + img_mask = np.minimum( + img_mask, + cv2.resize(np.rot90(img_mask, k=2), (xsize, ysize), interpolation=cv2.INTER_AREA), + ) + return img_mask def generate_noise( @@ -457,11 +607,11 @@ def generate_noise( background_value = random.randint(noise_background[0], noise_background[1]) # initialize blank noise mask - img_mask = np.full((xsize, ysize), fill_value=background_value).astype("int") + img_mask = np.full((ysize, xsize), fill_value=background_value, dtype="int") # any invalid noise type will reset noise type to 0 - if self.noise_type not in [1, 2, 3, 4]: - noise_type = random.randint(1, 4) + if self.noise_type not in [1, 2, 3, 4, 5]: + noise_type = random.randint(1, 5) else: noise_type = self.noise_type diff --git a/benchmark/README.md b/benchmark/README.md index ea06576d..b1d840e1 100644 --- a/benchmark/README.md +++ b/benchmark/README.md @@ -9,46 +9,46 @@ python run_benchmarks.py --folder_path folder_path_with_images | Augmentation |Images per second|Memory usage (MB)| |--------------------|----------------:|----------------:| -|BadPhotoCopy | 0.38| 178.20| -|BindingsAndFasteners| 29.34| 21.43| -|BleedThrough | 0.39| 684.69| -|BookBinding | 0.08| 611.74| -|Brightness | 5.93| 147.99| -|BrightnessTexturize | 2.12| 181.74| -|ColorPaper | 5.12| 105.66| -|ColorShift | 0.87| 114.26| +|BadPhotoCopy | 0.37| 178.20| +|BindingsAndFasteners| 31.40| 21.43| +|BleedThrough | 0.43| 684.69| +|BookBinding | 0.11| 611.74| +|Brightness | 5.75| 147.99| +|BrightnessTexturize | 2.16| 181.74| +|ColorPaper | 5.07| 105.66| +|ColorShift | 0.88| 114.26| |DelaunayTessellation| 0.14| 60.00| -|DirtyDrum | 0.82| 482.63| -|DirtyRollers | 1.58| 249.52| -|Dithering | 3.96| 126.82| -|DotMatrix | 0.22| 80.53| -|Faxify | 2.02| 126.75| -|Folding | 2.85| 63.27| -|Gamma | 34.27| 25.36| -|Geometric | 189.37| 12.68| -|GlitchEffect | 1.48| 126.94| +|DirtyDrum | 0.96| 482.63| +|DirtyRollers | 1.63| 249.52| +|Dithering | 3.82| 126.82| +|DotMatrix | 0.64| 80.77| +|Faxify | 2.12| 126.74| +|Folding | 2.62| 63.28| +|Gamma | 34.39| 25.36| +|Geometric | 183.89| 12.68| +|GlitchEffect | 1.49| 126.94| |Hollow | 0.21| 343.17| -|InkBleed | 3.93| 177.51| -|InkColorSwap | 5.22| 51.99| -|InkMottling | 7.78| 55.99| -|InkShifter | 0.17| 426.89| -|Jpeg | 6.32| 25.85| -|Letterpress | 0.45| 158.10| -|LightingGradient | 0.44| 638.31| -|LinesDegradation | 1.69| 175.38| -|LowInkPeriodicLines | 6.47| 12.74| -|LowInkRandomLines | 114.02| 12.74| -|LowLightNoise | 0.30| 481.95| -|Markup | 2.97| 154.12| -|NoiseTexturize | 0.73| 249.36| -|NoisyLines | 0.86| 446.76| -|PageBorder | 0.32| 201.22| -|PatternGenerator | 1.68| 51.52| -|ReflectedLight | 0.08| 109.97| -|Scribbles | 1.00| 97.13| -|SectionShift | 129.82| 12.96| -|ShadowCast | 0.95| 50.79| -|Squish | 0.26| 465.31| -|SubtleNoise | 1.68| 202.87| -|VoronoiTessellation | 0.08| 57.48| -|WaterMark | 2.70| 352.34| +|InkBleed | 4.00| 177.51| +|InkColorSwap | 5.10| 51.99| +|InkMottling | 7.60| 55.99| +|InkShifter | 0.20| 426.89| +|Jpeg | 6.38| 25.85| +|Letterpress | 0.46| 158.10| +|LightingGradient | 0.47| 638.31| +|LinesDegradation | 1.51| 175.38| +|LowInkPeriodicLines | 6.19| 12.74| +|LowInkRandomLines | 144.08| 12.74| +|LowLightNoise | 0.32| 481.95| +|Markup | 2.54| 154.12| +|NoiseTexturize | 0.96| 249.36| +|NoisyLines | 1.04| 446.89| +|PageBorder | 0.56| 191.84| +|PatternGenerator | 1.00| 51.53| +|ReflectedLight | 0.08| 109.92| +|Scribbles | 0.93| 99.10| +|SectionShift | 154.45| 12.95| +|ShadowCast | 0.86| 50.80| +|Squish | 0.99| 443.70| +|SubtleNoise | 1.82| 202.87| +|VoronoiTessellation | 0.09| 58.18| +|WaterMark | 2.55| 390.55|