内容简介:使用两个数据集和上节课的代码差不多,根据彩色图片进行适当调整即可加载库
使用两个数据集
- LFW: vis-www.cs.umass.edu/lfw/ ,Labeled Faces in the Wild,超过1.3W张图片,其中1680人拥有超过两张或以上图片
- CelebA: mmlab.ie.cuhk.edu.hk/projects/Ce… ,CelebFaces Attributes Dataset,包括10177人共计超过20W张图片,并且每张图片还包括人脸的5个关键点位置和40个属性的01标注,例如是否有眼镜、帽子、胡子等
实现
和上节课的代码差不多,根据彩色图片进行适当调整即可
加载库
# -*- coding: utf-8 -*- import tensorflow as tf import numpy as np import urllib import tarfile import os import matplotlib.pyplot as plt %matplotlib inline from imageio import imread, imsave, mimsave from scipy.misc import imresize import glob 复制代码
下载LFW数据并解压处理,CelebA数据已经准备好
url = 'http://vis-www.cs.umass.edu/lfw/lfw.tgz' filename = 'lfw.tgz' directory = 'lfw_imgs' new_dir = 'lfw_new_imgs' if not os.path.isdir(new_dir): os.mkdir(new_dir) if not os.path.isdir(directory): if not os.path.isfile(filename): urllib.request.urlretrieve(url, filename) tar = tarfile.open(filename, 'r:gz') tar.extractall(path=directory) tar.close() count = 0 for dir_, _, files in os.walk(directory): for file_ in files: img = imread(os.path.join(dir_, file_)) imsave(os.path.join(new_dir, '%d.png' % count), img) count += 1 复制代码
设定用于生成人脸的数据集
# dataset = 'lfw_new_imgs' # LFW dataset = 'celeba' # CelebA images = glob.glob(os.path.join(dataset, '*.*')) print(len(images)) 复制代码
定义一些常量、网络输入、辅助函数
batch_size = 100 z_dim = 100 WIDTH = 64 HEIGHT = 64 OUTPUT_DIR = 'samples_' + dataset if not os.path.exists(OUTPUT_DIR): os.mkdir(OUTPUT_DIR) X = tf.placeholder(dtype=tf.float32, shape=[None, HEIGHT, WIDTH, 3], name='X') noise = tf.placeholder(dtype=tf.float32, shape=[None, z_dim], name='noise') is_training = tf.placeholder(dtype=tf.bool, name='is_training') def lrelu(x, leak=0.2): return tf.maximum(x, leak * x) def sigmoid_cross_entropy_with_logits(x, y): return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y) 复制代码
判别器部分
def discriminator(image, reuse=None, is_training=is_training): momentum = 0.9 with tf.variable_scope('discriminator', reuse=reuse): h0 = lrelu(tf.layers.conv2d(image, kernel_size=5, filters=64, strides=2, padding='same')) h1 = tf.layers.conv2d(h0, kernel_size=5, filters=128, strides=2, padding='same') h1 = lrelu(tf.contrib.layers.batch_norm(h1, is_training=is_training, decay=momentum)) h2 = tf.layers.conv2d(h1, kernel_size=5, filters=256, strides=2, padding='same') h2 = lrelu(tf.contrib.layers.batch_norm(h2, is_training=is_training, decay=momentum)) h3 = tf.layers.conv2d(h2, kernel_size=5, filters=512, strides=2, padding='same') h3 = lrelu(tf.contrib.layers.batch_norm(h3, is_training=is_training, decay=momentum)) h4 = tf.contrib.layers.flatten(h3) h4 = tf.layers.dense(h4, units=1) return tf.nn.sigmoid(h4), h4 复制代码
生成器部分
def generator(z, is_training=is_training): momentum = 0.9 with tf.variable_scope('generator', reuse=None): d = 4 h0 = tf.layers.dense(z, units=d * d * 512) h0 = tf.reshape(h0, shape=[-1, d, d, 512]) h0 = tf.nn.relu(tf.contrib.layers.batch_norm(h0, is_training=is_training, decay=momentum)) h1 = tf.layers.conv2d_transpose(h0, kernel_size=5, filters=256, strides=2, padding='same') h1 = tf.nn.relu(tf.contrib.layers.batch_norm(h1, is_training=is_training, decay=momentum)) h2 = tf.layers.conv2d_transpose(h1, kernel_size=5, filters=128, strides=2, padding='same') h2 = tf.nn.relu(tf.contrib.layers.batch_norm(h2, is_training=is_training, decay=momentum)) h3 = tf.layers.conv2d_transpose(h2, kernel_size=5, filters=64, strides=2, padding='same') h3 = tf.nn.relu(tf.contrib.layers.batch_norm(h3, is_training=is_training, decay=momentum)) h4 = tf.layers.conv2d_transpose(h3, kernel_size=5, filters=3, strides=2, padding='same', activation=tf.nn.tanh, name='g') return h4 复制代码
损失函数
g = generator(noise) d_real, d_real_logits = discriminator(X) d_fake, d_fake_logits = discriminator(g, reuse=True) vars_g = [var for var in tf.trainable_variables() if var.name.startswith('generator')] vars_d = [var for var in tf.trainable_variables() if var.name.startswith('discriminator')] loss_d_real = tf.reduce_mean(sigmoid_cross_entropy_with_logits(d_real_logits, tf.ones_like(d_real))) loss_d_fake = tf.reduce_mean(sigmoid_cross_entropy_with_logits(d_fake_logits, tf.zeros_like(d_fake))) loss_g = tf.reduce_mean(sigmoid_cross_entropy_with_logits(d_fake_logits, tf.ones_like(d_fake))) loss_d = loss_d_real + loss_d_fake 复制代码
优化函数
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): optimizer_d = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5).minimize(loss_d, var_list=vars_d) optimizer_g = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5).minimize(loss_g, var_list=vars_g) 复制代码
读取图片的函数
def read_image(path, height, width): image = imread(path) h = image.shape[0] w = image.shape[1] if h > w: image = image[h // 2 - w // 2: h // 2 + w // 2, :, :] else: image = image[:, w // 2 - h // 2: w // 2 + h // 2, :] image = imresize(image, (height, width)) return image / 255. 复制代码
合成图片的函数
def montage(images): if isinstance(images, list): images = np.array(images) img_h = images.shape[1] img_w = images.shape[2] n_plots = int(np.ceil(np.sqrt(images.shape[0]))) if len(images.shape) == 4 and images.shape[3] == 3: m = np.ones( (images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots + n_plots + 1, 3)) * 0.5 elif len(images.shape) == 4 and images.shape[3] == 1: m = np.ones( (images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots + n_plots + 1, 1)) * 0.5 elif len(images.shape) == 3: m = np.ones( (images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots + n_plots + 1)) * 0.5 else: raise ValueError('Could not parse image shape of {}'.format(images.shape)) for i in range(n_plots): for j in range(n_plots): this_filter = i * n_plots + j if this_filter < images.shape[0]: this_img = images[this_filter] m[1 + i + i * img_h:1 + i + (i + 1) * img_h, 1 + j + j * img_w:1 + j + (j + 1) * img_w] = this_img return m 复制代码
模型的训练
sess = tf.Session() sess.run(tf.global_variables_initializer()) z_samples = np.random.uniform(-1.0, 1.0, [batch_size, z_dim]).astype(np.float32) samples = [] loss = {'d': [], 'g': []} offset = 0 for i in range(60000): n = np.random.uniform(-1.0, 1.0, [batch_size, z_dim]).astype(np.float32) offset = (offset + batch_size) % len(images) batch = np.array([read_image(img, HEIGHT, WIDTH) for img in images[offset: offset + batch_size]]) batch = (batch - 0.5) * 2 d_ls, g_ls = sess.run([loss_d, loss_g], feed_dict={X: batch, noise: n, is_training: True}) loss['d'].append(d_ls) loss['g'].append(g_ls) sess.run(optimizer_d, feed_dict={X: batch, noise: n, is_training: True}) sess.run(optimizer_g, feed_dict={X: batch, noise: n, is_training: True}) sess.run(optimizer_g, feed_dict={X: batch, noise: n, is_training: True}) if i % 500 == 0: print(i, d_ls, g_ls) gen_imgs = sess.run(g, feed_dict={noise: z_samples, is_training: False}) gen_imgs = (gen_imgs + 1) / 2 imgs = [img[:, :, :] for img in gen_imgs] gen_imgs = montage(imgs) plt.axis('off') plt.imshow(gen_imgs) imsave(os.path.join(OUTPUT_DIR, 'sample_%d.jpg' % i), gen_imgs) plt.show() samples.append(gen_imgs) plt.plot(loss['d'], label='Discriminator') plt.plot(loss['g'], label='Generator') plt.legend(loc='upper right') plt.savefig(os.path.join(OUTPUT_DIR, 'Loss.png')) plt.show() mimsave(os.path.join(OUTPUT_DIR, 'samples.gif'), samples, fps=10) 复制代码
LFW人脸生成结果如下
CelebA人脸生成结果如下
保存模型,便于后续使用
saver = tf.train.Saver() saver.save(sess, os.path.join(OUTPUT_DIR, 'dcgan_' + dataset), global_step=60000) 复制代码
在单机上使用模型生成人脸图片
# -*- coding: utf-8 -*- import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import os batch_size = 100 z_dim = 100 dataset = 'lfw_new_imgs' # dataset = 'celeba' def montage(images): if isinstance(images, list): images = np.array(images) img_h = images.shape[1] img_w = images.shape[2] n_plots = int(np.ceil(np.sqrt(images.shape[0]))) if len(images.shape) == 4 and images.shape[3] == 3: m = np.ones( (images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots + n_plots + 1, 3)) * 0.5 elif len(images.shape) == 4 and images.shape[3] == 1: m = np.ones( (images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots + n_plots + 1, 1)) * 0.5 elif len(images.shape) == 3: m = np.ones( (images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots + n_plots + 1)) * 0.5 else: raise ValueError('Could not parse image shape of {}'.format(images.shape)) for i in range(n_plots): for j in range(n_plots): this_filter = i * n_plots + j if this_filter < images.shape[0]: this_img = images[this_filter] m[1 + i + i * img_h:1 + i + (i + 1) * img_h, 1 + j + j * img_w:1 + j + (j + 1) * img_w] = this_img return m sess = tf.Session() sess.run(tf.global_variables_initializer()) saver = tf.train.import_meta_graph(os.path.join('samples_' + dataset, 'dcgan_' + dataset + '-60000.meta')) saver.restore(sess, tf.train.latest_checkpoint('samples_' + dataset)) graph = tf.get_default_graph() g = graph.get_tensor_by_name('generator/g/Tanh:0') noise = graph.get_tensor_by_name('noise:0') is_training = graph.get_tensor_by_name('is_training:0') n = np.random.uniform(-1.0, 1.0, [batch_size, z_dim]).astype(np.float32) gen_imgs = sess.run(g, feed_dict={noise: n, is_training: False}) gen_imgs = (gen_imgs + 1) / 2 imgs = [img[:, :, :] for img in gen_imgs] gen_imgs = montage(imgs) gen_imgs = np.clip(gen_imgs, 0, 1) plt.figure(figsize=(8, 8)) plt.axis('off') plt.imshow(gen_imgs) plt.show() 复制代码
以上所述就是小编给大家介绍的《深度有趣 | 08 DCGAN人脸图片生成》,希望对大家有所帮助,如果大家有任何疑问请给我留言,小编会及时回复大家的。在此也非常感谢大家对 码农网 的支持!
猜你喜欢:- 深度人脸识别中不同损失函数的性能对比
- 人脸识别技术全面总结:从传统方法到深度学习
- digiKam 7.0.0 发布,采用深度学习进行人脸识别
- FSRNet:端到端深度可训练人脸超分辨网络
- 天地伟业杨清永:深度学习让人脸识别进入2.0时代
- 深度学习下的人脸识别技术:从“后真相”到“无隐私”
本站部分资源来源于网络,本站转载出于传递更多信息之目的,版权归原作者或者来源机构所有,如转载稿涉及版权问题,请联系我们。
VISUAL BASIC 6.0 WINDOWS API讲座
王国荣 / 人民邮电出版社 / 1999-06-01 / 76.00元
本书全面介绍了在Visual Basic 6.0中如何调用Windows API的技术,特别是结合读者在应用中经常遇到的具体问题编写了许多应用范例,书中还给出了API函数的速查表。本书主要内容包括: Windows API的基本概念和调用方法,资源文件的使用,Windows的消息系统及其应用,API在绘图中的应用,多媒体文件的播放,特殊命令按钮的制作等。 本书适用于已熟悉Visual Basic的一起来看看 《VISUAL BASIC 6.0 WINDOWS API讲座》 这本书的介绍吧!