diff --git a/data/single_dataset.py b/data/single_dataset.py
index 2111486..f3bb46e 100644
--- a/data/single_dataset.py
+++ b/data/single_dataset.py
@@ -37,6 +37,7 @@ def __getitem__(self, index):
"""
A_path = self.A_paths[index]
A_img = Image.open(A_path).convert('RGB')
+ self.opt.W, self.opt.H = A_img.size
transform_params_A = get_params(self.opt, A_img.size)
A = get_transform(self.opt, transform_params_A, grayscale=(self.input_nc == 1))(A_img)
item = {'A': A, 'A_paths': A_path}
diff --git a/imgs/architecture.jpg b/imgs/architecture.jpg
new file mode 100644
index 0000000..fa16a32
Binary files /dev/null and b/imgs/architecture.jpg differ
diff --git a/imgs/architecture.png b/imgs/architecture.png
deleted file mode 100644
index 97ec7cd..0000000
Binary files a/imgs/architecture.png and /dev/null differ
diff --git a/imgs/how_to_crop.jpg b/imgs/how_to_crop.jpg
index 515de73..e81eb98 100644
Binary files a/imgs/how_to_crop.jpg and b/imgs/how_to_crop.jpg differ
diff --git a/imgs/result_html.jpg b/imgs/result_html.jpg
new file mode 100644
index 0000000..d77b6cf
Binary files /dev/null and b/imgs/result_html.jpg differ
diff --git a/imgs/test1/cropped2.jpg b/imgs/test1/cropped2.jpg
new file mode 100644
index 0000000..f72adbb
Binary files /dev/null and b/imgs/test1/cropped2.jpg differ
diff --git a/readme.md b/readme.md
index 3ac27b2..acd972c 100755
--- a/readme.md
+++ b/readme.md
@@ -8,7 +8,7 @@ This project generates artistic portrait drawings from face photos using a GAN-b
## Our Proposed Framework
-
+
## Sample Results
From left to right: input, output(style1), output(style2), output(style3)
@@ -34,7 +34,7 @@ If you use this code for your research, please cite our paper.
## Installation
-- Install PyTorch 1.1.0 and torchvision from http://pytorch.org and other dependencies (e.g., [visdom](https://github.com/facebookresearch/visdom) and [dominate](https://github.com/Knio/dominate)). You can install all the dependencies by
+- To install the dependencies, run
```bash
pip install -r requirements.txt
```
@@ -55,7 +55,7 @@ The result images are saved in `./results/pretrained/test_200/images3styles`,
where `real`, `fake1`, `fake2`, `fake3` correspond to input face photo, style1 drawing, style2 drawing, style3 drawing respectively.
-- 3. To test on your own photos, the photos need to be square (since the program will load it and resized as 512x512). You can use an image editor to crop a square area of your photo that contains face (or use an optional preprocess [here](preprocess/readme.md)). Then specify the folder that contains test photos using `--dataroot`, specify save folder name using `--savefolder` and run the above command again:
+- 3. To test on your own photos: First use an image editor to crop the face region of your photo (or use an optional preprocess [here](preprocess/readme.md)). Then specify the folder that contains test photos using `--dataroot`, specify save folder name using `--savefolder` and run the above command again:
``` bash
# with GPU
@@ -67,6 +67,8 @@ python test_seq_style.py --gpu -1 --dataroot ./imgs/test1 --savefolder 3styles_t
```
The test results will be saved to a html file here: `./results/pretrained/test_200/index[save_folder_name].html`.
The result images are saved in `./results/pretrained/test_200/images[save_folder_name]`.
+An example html screenshot is shown below:
+
You can contact email yr16@mails.tsinghua.edu.cn for any questions.
diff --git a/test.py b/test.py
index bbd6f02..bc9b256 100644
--- a/test.py
+++ b/test.py
@@ -63,5 +63,5 @@
img_path = model.get_image_paths() # get image paths
if i % 5 == 0: # save images to an HTML file
print('processing (%04d)-th image... %s' % (i, img_path))
- save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)
+ save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize, W=opt.W, H=opt.H)
webpage.save() # save the HTML
diff --git a/util/visualizer.py b/util/visualizer.py
index e4d0ae2..2d9c9ab 100644
--- a/util/visualizer.py
+++ b/util/visualizer.py
@@ -5,9 +5,7 @@
import time
from . import util, html
from subprocess import Popen, PIPE
-from scipy.misc import imresize
-import pdb
-from scipy.io import savemat
+from PIL import Image
if sys.version_info[0] == 2:
VisdomExceptionBase = Exception
@@ -15,7 +13,7 @@
VisdomExceptionBase = ConnectionError
-def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
+def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256, W=None, H=None):
"""Save images to the disk.
Parameters:
@@ -37,16 +35,16 @@ def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
for label, im_data in visuals.items():
## tensor to im
im = util.tensor2im(im_data)
- #im,imo = util.tensor2im(im_data)
- #matname = os.path.join(image_dir, '%s_%s.mat' % (name, label))
- #savemat(matname,{'imo':imo})
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
h, w, _ = im.shape
- if aspect_ratio > 1.0:
- im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic')
- if aspect_ratio < 1.0:
- im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic')
+ if W is not None and H is not None and (W != w or H != h):
+ im = np.array(Image.fromarray(im).resize((W, H), Image.BICUBIC))
+ else:
+ if aspect_ratio > 1.0:
+ im = np.array(Image.fromarray(im).resize((int(w * aspect_ratio), h), Image.BICUBIC))
+ if aspect_ratio < 1.0:
+ im = np.array(Image.fromarray(im).resize((w, int(h / aspect_ratio)), Image.BICUBIC))
util.save_image(im, save_path)
ims.append(image_name)
@@ -133,7 +131,6 @@ def display_current_results(self, visuals, epoch, save_result):
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
label_html_row += '