1.5 条件生成对抗网络(CGAN)

1.什么是CGAN

       在CGAN训练期间,生成器学习为训练数据集中的每个标签生成逼真的样本,而鉴别器则学习区分真的样本-标签对与假的样本-标签对。只学习接受真实且样本-标签匹配正确的对,拒绝不匹配的对和样本为假的对。

2.生成器

1.5 条件生成对抗网络(CGAN)

 条件标签称为y,生成器使用噪声向量Z和标签y合成一个伪样本。

3.鉴别器

1.5 条件生成对抗网络(CGAN)

 接受带标签的真实样本(x,y),以及生成器生成的伪样本

1.5 条件生成对抗网络(CGAN)

1.5 条件生成对抗网络(CGAN)

 

4.代码实现 

      4.1导入数据库

%matplotlib inline

import matplotlib.pyplot as plt
import numpy as np

from keras.datasets import mnist
from keras.layers import (Activation, BatchNormalization, Concatenate, Dense,
                          Embedding, Flatten, Input, Multiply, Reshape)
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.models import Model, Sequential
#此处如果报错,就将下面改为from keras.optimizer_v2 import adam as Adam
from keras.optimizers import Adam

      4.2模型的输入维度

img_rows = 28
img_cols = 28
channels = 1

# 输入图像的维度
img_shape = (img_rows, img_cols, channels)

# 噪声向量的大小,用作生成器的输入
z_dim = 100

# 数据集中的类别数
num_classes = 10

       4.3CGAN生成器

def build_generator(z_dim):

    model = Sequential()

    # 通过全连接层将输入变为7*7*256的张量
    model.add(Dense(256 * 7 * 7, input_dim=z_dim))
    model.add(Reshape((7, 7, 256)))

    # 转置卷积层,张量从7*7*256变成14*14*128
    model.add(Conv2DTranspose(128, kernel_size=3, strides=2, padding='same'))

    # 批归一化
    model.add(BatchNormalization())

    # Leaky ReLU 激活函数
    model.add(LeakyReLU(alpha=0.01))

    # 转置卷积层,张量成14*14*128变成14*14*64
    model.add(Conv2DTranspose(64, kernel_size=3, strides=1, padding='same'))

    # 批归一化
    model.add(BatchNormalization())

    # Leaky ReLU 激活函数
    model.add(LeakyReLU(alpha=0.01))

    # 转置卷积层,14*14*64变成28*28*1
    model.add(Conv2DTranspose(1, kernel_size=3, strides=2, padding='same'))

    # 带有tanh的输出层
    model.add(Activation('tanh'))

    return model
def build_cgan_generator(z_dim):

    # 随机噪声Z
    z = Input(shape=(z_dim, ))

    # 条件标签:G应该生成指定数字,整数0-9
    label = Input(shape=(1, ), dtype='int32')

    # 标签嵌入:
    # ----------------
    # 将标签转化为大小为z_dim的稠密向量
    # 生成形状为 (batch_size, 1, z_dim)的三维张量
    label_embedding = Embedding(num_classes, z_dim, input_length=1)(label)

    # 将嵌入的三维张量展平为形状为 (batch_size, z_dim)的二维张量
    label_embedding = Flatten()(label_embedding)

    # 向量z和嵌入标签的元素级乘积
    joined_representation = Multiply()([z, label_embedding])

    generator = build_generator(z_dim)

    # 为给定的标签生成图像
    conditioned_img = generator(joined_representation)

    return Model([z, label], conditioned_img)

1.使用Embedding层将标签y转换为大小z_dim的稠密向量

2.Multiply层将标签与噪声向量z嵌入联合表示中,就是将2个等长向量的对应项相乘,输出作为结果乘积的单个向量

       4.4鉴别器

def build_discriminator(img_shape):

    model = Sequential()

    # 卷积层,从28*28*2变成14*14*64的张量
    model.add(
        Conv2D(64,
               kernel_size=3,
               strides=2,
               input_shape=(img_shape[0], img_shape[1], img_shape[2] + 1),
               padding='same'))

    # Leaky ReLU 激活函数
    model.add(LeakyReLU(alpha=0.01))

    # 卷积层,从14*14*14变成7*7*64的张量
    model.add(
        Conv2D(64,
               kernel_size=3,
               strides=2,
               input_shape=img_shape,
               padding='same'))

    # 批归一化
    model.add(BatchNormalization())

    # Leaky ReLU 激活函数
    model.add(LeakyReLU(alpha=0.01))

    # 卷积层,从7*7*64变成3*3*128的张量
    model.add(
        Conv2D(128,
               kernel_size=3,
               strides=2,
               input_shape=img_shape,
               padding='same'))

    # 批归一化
    model.add(BatchNormalization())

    # Leaky ReLU的激活函数
    model.add(LeakyReLU(alpha=0.01))

    # 带有sigmoid激活函数的输出层
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))

    return model
def build_cgan_discriminator(img_shape):

    # 输入图像
    img = Input(shape=img_shape)

    # 为输入图像加标签
    label = Input(shape=(1, ), dtype='int32')

    # 标签嵌入:
    # ----------------
    # 将标签转化为大小为z_dim的稠密向量
    # 生成形状为 (batch_size, 1, 28*28*1)的3维向量
    label_embedding = Embedding(num_classes,
                                np.prod(img_shape),
                                input_length=1)(label)

    # 将嵌入的三维张量展平成形状为 (batch_size, 28*28*1)的二维张量
    label_embedding = Flatten()(label_embedding)

    # 将嵌入标签调整为和输入图像一样的维度
    label_embedding = Reshape(img_shape)(label_embedding)

    # 将图像与其嵌入标签连接
    concatenated = Concatenate(axis=-1)([img, label_embedding])

    discriminator = build_discriminator(img_shape)

    # 将图像-标签对进行分类
    classification = discriminator(concatenated)

    return Model([img, label], classification)

1.5 条件生成对抗网络(CGAN)

      4.5搭建整个模型

def build_cgan(generator, discriminator):

    # 随机噪声向量z
    z = Input(shape=(z_dim, ))

    # 图像标签
    label = Input(shape=(1, ))

    #为指定标签生成图像
    img = generator([z, label])

    classification = discriminator([img, label])

    # Combined Generator -> Discriminator model
    # G([z, lablel]) = x*
    # D(x*) = classification
    model = Model([z, label], classification)

    return model
# 构建并编译鉴别器,如果报错,替换optimizer=Adam.Adam())
discriminator = build_cgan_discriminator(img_shape)
discriminator.compile(loss='binary_crossentropy',
                      optimizer=Adam(),
                      metrics=['accuracy'])

#构建生成器
generator = build_cgan_generator(z_dim)

# 生成器训练时鉴别器参数保持不变
discriminator.trainable = False

# 构建并编译鉴别器固定的CGAN模型来训练生成器
cgan = build_cgan(generator, discriminator)
cgan.compile(loss='binary_crossentropy', optimizer=Adam())

      4.6训练

accuracies = []
losses = []


def train(iterations, batch_size, sample_interval):

    # 导入mnist数据集
    (X_train, y_train), (_, _) = mnist.load_data()

    # 灰度像素值从[0,255]缩放到[-1, 1]
    X_train = X_train / 127.5 - 1.
    X_train = np.expand_dims(X_train, axis=3)

    # 真实图像的标签都为1
    real = np.ones((batch_size, 1))

    # 假图像的标签都为0
    fake = np.zeros((batch_size, 1))

    for iteration in range(iterations):

        # -------------------------
        #  训练鉴定器
        # -------------------------

        # 生成一批量伪样本及其标签
        idx = np.random.randint(0, X_train.shape[0], batch_size)
        imgs, labels = X_train[idx], y_train[idx]

        # 生成一批为图像
        z = np.random.normal(0, 1, (batch_size, z_dim))
        gen_imgs = generator.predict([z, labels])

        # 训练鉴别器
        d_loss_real = discriminator.train_on_batch([imgs, labels], real)
        d_loss_fake = discriminator.train_on_batch([gen_imgs, labels], fake)
        d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

        # ---------------------
        # 训练生成器
        # ---------------------

        #生成一批噪声向量
        z = np.random.normal(0, 1, (batch_size, z_dim))

        # 得到一批次随机标签
        labels = np.random.randint(0, num_classes, batch_size).reshape(-1, 1)

        # 训练生成器
        g_loss = cgan.train_on_batch([z, labels], real)

        if (iteration + 1) % sample_interval == 0:

            # 输出训练过程
            print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" %
                  (iteration + 1, d_loss[0], 100 * d_loss[1], g_loss))

           
            losses.append((d_loss[0], g_loss))
            accuracies.append(100 * d_loss[1])

           
            sample_images()

     4.7显示生成图像

def sample_images(image_grid_rows=2, image_grid_columns=5):

    # 随机噪声采样
    z = np.random.normal(0, 1, (image_grid_rows * image_grid_columns, z_dim))

    # 得到图像标签0-9
    labels = np.arange(0, 10).reshape(-1, 1)

    # 从随机噪声生成图像
    gen_imgs = generator.predict([z, labels])

    # 图像像素缩放到[0,1]
    gen_imgs = 0.5 * gen_imgs + 0.5

    # 设置图像网格
    fig, axs = plt.subplots(image_grid_rows,
                            image_grid_columns,
                            figsize=(10, 4),
                            sharey=True,
                            sharex=True)

    cnt = 0
    for i in range(image_grid_rows):
        for j in range(image_grid_columns):
            输出图像网格
            axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')
            axs[i, j].axis('off')
            axs[i, j].set_title("Digit: %d" % labels[cnt])
            cnt += 1

      4.8训练模型

# 设置超参数
iterations = 12000
batch_size = 32
sample_interval = 1000
 
# 训练模型直到指定的迭代次数
train(iterations, batch_size, sample_interval)

上一篇:xlrd写入excel


下一篇:Latex学习:命令的简单实习