From ced1b6dfac6fa46dc35d354e376e04da5b3442d2 Mon Sep 17 00:00:00 2001 From: Voldie Date: Wed, 28 Feb 2024 19:11:10 +0100 Subject: [PATCH] Fixed image size, fileprefix and other minor changes --- .gitignore | 6 ++++- README.md | 2 +- superresolution/SuperResolution.py | 20 +++++++++----- superresolution/core/common.py | 3 --- superresolution/util/dataProcessing.py | 33 +++++++++++++++++------- superresolution/util/image.py | 3 ++- superresolution/util/trainingcallback.py | 8 +++--- 7 files changed, 49 insertions(+), 26 deletions(-) diff --git a/.gitignore b/.gitignore index 0f96564..66fb01e 100644 --- a/.gitignore +++ b/.gitignore @@ -113,8 +113,12 @@ experiments/ # option (not recommended) you can uncomment the following to ignore the entire idea folder. .idea/ -commandline */cpkt* +*.index +*.pb +*.data-* + +commandline *.png *.json *.txt diff --git a/README.md b/README.md index 030f0e7..322a968 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ A SuperResolution training program for creating/training upscaling machine learn ### EDSR - Enhanced Deep Residual Networks for Single Image Super-Resolution ```bash -python superresolution/SuperResolution.py --batch-size 16 --epochs 10 --image-size 128 128 --model edsr --learning-rate 0.0003 --color-space rgb --loss-fn msa --shuffle-data-set-size 512 --show-psnr --data-set-directory /path_to_training_data/ --output-dir image-super-resolution-result/ +python superresolution/SuperResolution.py --batch-size 16 --epochs 10 --image-size 128 128 --model edsr --learning-rate 0.0003 --decay-rate 0.9 --decay-step 10000 --color-space rgb --loss-fn msa --shuffle-data-set-size 1024 --show-psnr --data-set-directory /path_to_training_data/ --output-dir image-super-resolution-result/ ``` ![Gangsta Anime EDSR Super Resolution Example from Trained model](https://github.com/voldien/SuperResolution/assets/9608088/1951a0c3-cebb-4ea8-818e-4a04bf28e116) diff --git a/superresolution/SuperResolution.py b/superresolution/SuperResolution.py index 35f608f..ae19ec0 100755 --- a/superresolution/SuperResolution.py +++ b/superresolution/SuperResolution.py @@ -186,8 +186,10 @@ def run_train_model(args: dict, training_dataset: Dataset, validation_dataset: D # Create Input and Output Size # TODO determine size. + resolution_upscale_constant: int = 2 image_input_size = ( - int(args.image_size[0] / 2), int(args.image_size[1] / 2), args.color_channels) + int(args.image_size[0] / resolution_upscale_constant), int(args.image_size[1] / resolution_upscale_constant), + args.color_channels) image_output_size = ( args.image_size[0], args.image_size[1], args.color_channels) @@ -246,7 +248,6 @@ def run_train_model(args: dict, training_dataset: Dataset, validation_dataset: D validation_data_ds = configure_dataset_performance(ds=validation_dataset, use_cache=False, cache_path=None, shuffle_size=0) - # Apply data augmentation validation_data_ds = augment_dataset(dataset=validation_data_ds, image_crop_shape=image_output_size) @@ -318,9 +319,15 @@ def run_train_model(args: dict, training_dataset: Dataset, validation_dataset: D example_result_call_back = SaveExampleResultImageCallBack( args.output_dir, non_augmented_dataset_train, args.color_space, - nth_batch_sample=args.example_batch, grid_size=args.example_batch_grid_size) + nth_batch_sample=args.example_nth_batch, grid_size=args.example_nth_batch_grid_size) training_callbacks.append(example_result_call_back) + # Debug output of trained data. + #training_callbacks.append(SaveExampleResultImageCallBack( + # args.output_dir, + # training_dataset, args.color_space, fileprefix="trainSuperResolution", + # nth_batch_sample=args.example_nth_batch, grid_size=args.example_nth_batch_grid_size)) + composite_train_callback = CompositeImageResultCallBack( dir_path=args.output_dir, name="train", @@ -396,14 +403,13 @@ def dcsuperresolution_program(vargs=None): parser.add_argument('--output-dir', type=str, dest='output_dir', default=str.format("super-resolution-{0}", date.today().strftime("%b-%d-%Y_%H:%M:%S")), help='Set the output directory that all the models and results will be stored at') - # - parser.add_argument('--example-batch', dest='example_batch', required=False, # TODO rename + parser.add_argument('--example-batch', dest='example_nth_batch', required=False, # TODO rename type=int, default=1024, help='Set the number of train batches between saving work in progress result.') # - parser.add_argument('--example-batch-grid-size', dest='example_batch_grid_size', + parser.add_argument('--example-batch-grid-size', dest='example_nth_batch_grid_size', type=int, required=False, default=8, help='Set the grid size of number of example images.') @@ -465,7 +471,7 @@ def dcsuperresolution_program(vargs=None): sr_logger.info(str.format("Use RAM Cache: {0}", args.cache_ram)) - sr_logger.info(str.format("Example Batch Grid Size: {0}", args.example_batch_grid_size)) + sr_logger.info(str.format("Example Batch Grid Size: {0}", args.example_nth_batch_grid_size)) sr_logger.info(str.format("Image Training Set: {0}", args.image_size)) sr_logger.info(str.format("Learning Rate: {0}", args.learning_rate)) sr_logger.info(str.format( diff --git a/superresolution/core/common.py b/superresolution/core/common.py index d4cf5a4..99a1c42 100644 --- a/superresolution/core/common.py +++ b/superresolution/core/common.py @@ -93,9 +93,6 @@ def DefaultArgumentParser() -> argparse.ArgumentParser: # parser.add_argument('--seed', type=int, default=randrange(10000000), dest='seed', help='Set the random seed') - - parser.add_argument('--nr_image_example_generate', type=int, default=16, dest='num_examples_to_generate', - help='Number') # parser.add_argument('--color-space', type=str, default="rgb", dest='color_space', choices=['rgb', 'lab'], help='Select Color Space used in the model.') diff --git a/superresolution/util/dataProcessing.py b/superresolution/util/dataProcessing.py index 287eb6f..e038772 100644 --- a/superresolution/util/dataProcessing.py +++ b/superresolution/util/dataProcessing.py @@ -98,21 +98,22 @@ def augment_dataset(dataset: Dataset, image_crop_shape: tuple) -> Dataset: :return: Augmented DataSet """ trainAug = tf.keras.Sequential([ - # Select random section of Image. - tf.keras.layers.RandomCrop( - image_crop_shape[0], image_crop_shape[1]), - # Flip image around on each axis randomly. - layers.RandomFlip("horizontal_and_vertical"), # Random Zoom. layers.RandomZoom( - height_factor=(-0.05, 0.05), - width_factor=(-0.05, 0.05), + height_factor=(-0.1, 0.1), + width_factor=(-0.1, 0.1), fill_mode='reflect', interpolation='bilinear'), # Random Rotation. layers.RandomRotation(factor=0.65, fill_mode='reflect', - interpolation='bilinear') + interpolation='bilinear'), + # Select random section of Image. + tf.keras.layers.RandomCrop( + image_crop_shape[0], image_crop_shape[1]), + # Flip image around on each axis randomly. + layers.RandomFlip("horizontal_and_vertical"), + ]) def AgumentFunc(x): @@ -127,7 +128,7 @@ def AgumentFunc(x): return dataset -def dataset_super_resolution(dataset: Dataset, input_size, output_size) -> Dataset: +def dataset_super_resolution(dataset: Dataset, input_size: tuple, output_size: tuple) -> Dataset: """ Perform Super Resolution Data and Expected Data to Correct Size. For providing the model with corrected sized Data. @@ -150,6 +151,14 @@ def DownScaleLayer(data): interpolation='bilinear', crop_to_aspect_ratio=False )]) + + expectedScale = tf.keras.Sequential([ + layers.Resizing( + output_size[0], + output_size[1], + interpolation='bilinear', + crop_to_aspect_ratio=False + )]) # Create a copy to prevent augmentation be done twice separately. expected = tf.identity(data) @@ -161,7 +170,11 @@ def DownScaleLayer(data): # Remap from [0, 1] to [-1,1] data = (2.0 * data) - 1.0 - return data, expected + expected_data = (expected + 1.0) * 0.5 + expected_data = expectedScale(expected_data) + expected_data = (2.0 * expected_data) - 1.0 + + return data, expected_data DownScaledDataSet = ( dataset diff --git a/superresolution/util/image.py b/superresolution/util/image.py index 0b666bd..72df306 100644 --- a/superresolution/util/image.py +++ b/superresolution/util/image.py @@ -2,9 +2,10 @@ from util.util import convert_nontensor_color_space import tensorflow as tf from numpy import asarray +from tensorflow.python.data import Dataset -def show_expect_predicted_result(model: tf.keras.Model, image_batch_dataset, color_space: str, nr_col=6): +def show_expect_predicted_result(model: tf.keras.Model, image_batch_dataset: Dataset, color_space: str, nr_col=6): batch_iter = iter(image_batch_dataset) data_image_batch, expected_image_batch = batch_iter.next() diff --git a/superresolution/util/trainingcallback.py b/superresolution/util/trainingcallback.py index 3037eef..ce13d9a 100644 --- a/superresolution/util/trainingcallback.py +++ b/superresolution/util/trainingcallback.py @@ -13,7 +13,7 @@ def compute_normalized_PSNR(orignal, data): class SaveExampleResultImageCallBack(tf.keras.callbacks.Callback): - def __init__(self, dir_path, train_data_subset, color_space: str, nth_batch_sample: int = 512, grid_size: int = 6, + def __init__(self, dir_path, train_data_subset, color_space: str, nth_batch_sample: int = 512, grid_size: int = 6, fileprefix: str = "SuperResolution", **kwargs): super(tf.keras.callbacks.Callback, self).__init__(**kwargs) @@ -26,6 +26,8 @@ def __init__(self, dir_path, train_data_subset, color_space: str, nth_batch_samp self.color_space = color_space self.grid_size = grid_size + self.fileprefix = fileprefix + if not os.path.exists(self.dir_path): os.mkdir(self.dir_path) @@ -35,7 +37,7 @@ def on_epoch_begin(self, epoch, logs=None): def on_epoch_end(self, epoch, logs=None): fig = show_expect_predicted_result(model=self.model, image_batch_dataset=self.trainSet, color_space=self.color_space, nr_col=self.grid_size) - fig.savefig(os.path.join(self.dir_path, "SuperResolution{0}.png".format(epoch))) + fig.savefig(os.path.join(self.dir_path, "{0}{1}.png".format(self.fileprefix,epoch))) fig.clf() plt.close(fig) @@ -43,7 +45,7 @@ def on_train_batch_end(self, batch, logs=None): if batch % self.nth_batch_sample == 0: fig = show_expect_predicted_result(model=self.model, image_batch_dataset=self.trainSet, color_space=self.color_space, nr_col=self.grid_size) - fig.savefig(os.path.join(self.dir_path, "SuperResolution_{0}_{1}.png".format(self.current_epoch, batch))) + fig.savefig(os.path.join(self.dir_path, "{0}_{1}_{2}.png".format(self.fileprefix, self.current_epoch, batch))) fig.clf() plt.close(fig)