Skip to content

Commit

Permalink
Fixed image size, fileprefix and other minor changes
Browse files Browse the repository at this point in the history
  • Loading branch information
voldien committed Feb 28, 2024
1 parent bdc0eab commit ced1b6d
Show file tree
Hide file tree
Showing 7 changed files with 49 additions and 26 deletions.
6 changes: 5 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,12 @@ experiments/
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/

commandline
*/cpkt*
*.index
*.pb
*.data-*

commandline
*.png
*.json
*.txt
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ A SuperResolution training program for creating/training upscaling machine learn
### EDSR - Enhanced Deep Residual Networks for Single Image Super-Resolution

```bash
python superresolution/SuperResolution.py --batch-size 16 --epochs 10 --image-size 128 128 --model edsr --learning-rate 0.0003 --color-space rgb --loss-fn msa --shuffle-data-set-size 512 --show-psnr --data-set-directory /path_to_training_data/ --output-dir image-super-resolution-result/
python superresolution/SuperResolution.py --batch-size 16 --epochs 10 --image-size 128 128 --model edsr --learning-rate 0.0003 --decay-rate 0.9 --decay-step 10000 --color-space rgb --loss-fn msa --shuffle-data-set-size 1024 --show-psnr --data-set-directory /path_to_training_data/ --output-dir image-super-resolution-result/
```

![Gangsta Anime EDSR Super Resolution Example from Trained model](https://github.com/voldien/SuperResolution/assets/9608088/1951a0c3-cebb-4ea8-818e-4a04bf28e116)
Expand Down
20 changes: 13 additions & 7 deletions superresolution/SuperResolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,10 @@ def run_train_model(args: dict, training_dataset: Dataset, validation_dataset: D

# Create Input and Output Size
# TODO determine size.
resolution_upscale_constant: int = 2
image_input_size = (
int(args.image_size[0] / 2), int(args.image_size[1] / 2), args.color_channels)
int(args.image_size[0] / resolution_upscale_constant), int(args.image_size[1] / resolution_upscale_constant),
args.color_channels)
image_output_size = (
args.image_size[0], args.image_size[1], args.color_channels)

Expand Down Expand Up @@ -246,7 +248,6 @@ def run_train_model(args: dict, training_dataset: Dataset, validation_dataset: D
validation_data_ds = configure_dataset_performance(ds=validation_dataset, use_cache=False,
cache_path=None,
shuffle_size=0)

# Apply data augmentation
validation_data_ds = augment_dataset(dataset=validation_data_ds, image_crop_shape=image_output_size)

Expand Down Expand Up @@ -318,9 +319,15 @@ def run_train_model(args: dict, training_dataset: Dataset, validation_dataset: D
example_result_call_back = SaveExampleResultImageCallBack(
args.output_dir,
non_augmented_dataset_train, args.color_space,
nth_batch_sample=args.example_batch, grid_size=args.example_batch_grid_size)
nth_batch_sample=args.example_nth_batch, grid_size=args.example_nth_batch_grid_size)
training_callbacks.append(example_result_call_back)

# Debug output of trained data.
#training_callbacks.append(SaveExampleResultImageCallBack(
# args.output_dir,
# training_dataset, args.color_space, fileprefix="trainSuperResolution",
# nth_batch_sample=args.example_nth_batch, grid_size=args.example_nth_batch_grid_size))

composite_train_callback = CompositeImageResultCallBack(
dir_path=args.output_dir,
name="train",
Expand Down Expand Up @@ -396,14 +403,13 @@ def dcsuperresolution_program(vargs=None):
parser.add_argument('--output-dir', type=str, dest='output_dir',
default=str.format("super-resolution-{0}", date.today().strftime("%b-%d-%Y_%H:%M:%S")),
help='Set the output directory that all the models and results will be stored at')

#
parser.add_argument('--example-batch', dest='example_batch', required=False, # TODO rename
parser.add_argument('--example-batch', dest='example_nth_batch', required=False, # TODO rename
type=int,
default=1024,
help='Set the number of train batches between saving work in progress result.')
#
parser.add_argument('--example-batch-grid-size', dest='example_batch_grid_size',
parser.add_argument('--example-batch-grid-size', dest='example_nth_batch_grid_size',
type=int, required=False,
default=8, help='Set the grid size of number of example images.')

Expand Down Expand Up @@ -465,7 +471,7 @@ def dcsuperresolution_program(vargs=None):

sr_logger.info(str.format("Use RAM Cache: {0}", args.cache_ram))

sr_logger.info(str.format("Example Batch Grid Size: {0}", args.example_batch_grid_size))
sr_logger.info(str.format("Example Batch Grid Size: {0}", args.example_nth_batch_grid_size))
sr_logger.info(str.format("Image Training Set: {0}", args.image_size))
sr_logger.info(str.format("Learning Rate: {0}", args.learning_rate))
sr_logger.info(str.format(
Expand Down
3 changes: 0 additions & 3 deletions superresolution/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,6 @@ def DefaultArgumentParser() -> argparse.ArgumentParser:
#
parser.add_argument('--seed', type=int, default=randrange(10000000), dest='seed',
help='Set the random seed')

parser.add_argument('--nr_image_example_generate', type=int, default=16, dest='num_examples_to_generate',
help='Number')
#
parser.add_argument('--color-space', type=str, default="rgb", dest='color_space', choices=['rgb', 'lab'],
help='Select Color Space used in the model.')
Expand Down
33 changes: 23 additions & 10 deletions superresolution/util/dataProcessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,21 +98,22 @@ def augment_dataset(dataset: Dataset, image_crop_shape: tuple) -> Dataset:
:return: Augmented DataSet
"""
trainAug = tf.keras.Sequential([
# Select random section of Image.
tf.keras.layers.RandomCrop(
image_crop_shape[0], image_crop_shape[1]),
# Flip image around on each axis randomly.
layers.RandomFlip("horizontal_and_vertical"),
# Random Zoom.
layers.RandomZoom(
height_factor=(-0.05, 0.05),
width_factor=(-0.05, 0.05),
height_factor=(-0.1, 0.1),
width_factor=(-0.1, 0.1),
fill_mode='reflect',
interpolation='bilinear'),
# Random Rotation.
layers.RandomRotation(factor=0.65,
fill_mode='reflect',
interpolation='bilinear')
interpolation='bilinear'),
# Select random section of Image.
tf.keras.layers.RandomCrop(
image_crop_shape[0], image_crop_shape[1]),
# Flip image around on each axis randomly.
layers.RandomFlip("horizontal_and_vertical"),

])

def AgumentFunc(x):
Expand All @@ -127,7 +128,7 @@ def AgumentFunc(x):
return dataset


def dataset_super_resolution(dataset: Dataset, input_size, output_size) -> Dataset:
def dataset_super_resolution(dataset: Dataset, input_size: tuple, output_size: tuple) -> Dataset:
"""
Perform Super Resolution Data and Expected Data to Correct Size. For providing
the model with corrected sized Data.
Expand All @@ -150,6 +151,14 @@ def DownScaleLayer(data):
interpolation='bilinear',
crop_to_aspect_ratio=False
)])

expectedScale = tf.keras.Sequential([
layers.Resizing(
output_size[0],
output_size[1],
interpolation='bilinear',
crop_to_aspect_ratio=False
)])

# Create a copy to prevent augmentation be done twice separately.
expected = tf.identity(data)
Expand All @@ -161,7 +170,11 @@ def DownScaleLayer(data):
# Remap from [0, 1] to [-1,1]
data = (2.0 * data) - 1.0

return data, expected
expected_data = (expected + 1.0) * 0.5
expected_data = expectedScale(expected_data)
expected_data = (2.0 * expected_data) - 1.0

return data, expected_data

DownScaledDataSet = (
dataset
Expand Down
3 changes: 2 additions & 1 deletion superresolution/util/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
from util.util import convert_nontensor_color_space
import tensorflow as tf
from numpy import asarray
from tensorflow.python.data import Dataset


def show_expect_predicted_result(model: tf.keras.Model, image_batch_dataset, color_space: str, nr_col=6):
def show_expect_predicted_result(model: tf.keras.Model, image_batch_dataset: Dataset, color_space: str, nr_col=6):
batch_iter = iter(image_batch_dataset)
data_image_batch, expected_image_batch = batch_iter.next()

Expand Down
8 changes: 5 additions & 3 deletions superresolution/util/trainingcallback.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def compute_normalized_PSNR(orignal, data):

class SaveExampleResultImageCallBack(tf.keras.callbacks.Callback):

def __init__(self, dir_path, train_data_subset, color_space: str, nth_batch_sample: int = 512, grid_size: int = 6,
def __init__(self, dir_path, train_data_subset, color_space: str, nth_batch_sample: int = 512, grid_size: int = 6, fileprefix: str = "SuperResolution",
**kwargs):
super(tf.keras.callbacks.Callback, self).__init__(**kwargs)

Expand All @@ -26,6 +26,8 @@ def __init__(self, dir_path, train_data_subset, color_space: str, nth_batch_samp
self.color_space = color_space
self.grid_size = grid_size

self.fileprefix = fileprefix

if not os.path.exists(self.dir_path):
os.mkdir(self.dir_path)

Expand All @@ -35,15 +37,15 @@ def on_epoch_begin(self, epoch, logs=None):
def on_epoch_end(self, epoch, logs=None):
fig = show_expect_predicted_result(model=self.model, image_batch_dataset=self.trainSet,
color_space=self.color_space, nr_col=self.grid_size)
fig.savefig(os.path.join(self.dir_path, "SuperResolution{0}.png".format(epoch)))
fig.savefig(os.path.join(self.dir_path, "{0}{1}.png".format(self.fileprefix,epoch)))
fig.clf()
plt.close(fig)

def on_train_batch_end(self, batch, logs=None):
if batch % self.nth_batch_sample == 0:
fig = show_expect_predicted_result(model=self.model, image_batch_dataset=self.trainSet,
color_space=self.color_space, nr_col=self.grid_size)
fig.savefig(os.path.join(self.dir_path, "SuperResolution_{0}_{1}.png".format(self.current_epoch, batch)))
fig.savefig(os.path.join(self.dir_path, "{0}_{1}_{2}.png".format(self.fileprefix, self.current_epoch, batch)))
fig.clf()
plt.close(fig)

Expand Down

0 comments on commit ced1b6d

Please sign in to comment.