Quellcode durchsuchen

添加验证代码

liyan vor 1 Jahr
Ursprung
Commit
19615c9293
4 geänderte Dateien mit 589 neuen und 0 gelöschten Zeilen
  1. 118 0
      verify_cifar10_alexnet8.py
  2. 131 0
      verify_cifar10_inception10.py
  3. 135 0
      verify_cifar10_resnet18.py
  4. 205 0
      verify_cifar10_vgg16.py

+ 118 - 0
verify_cifar10_alexnet8.py

@@ -0,0 +1,118 @@
+import tensorflow as tf
+import os
+import numpy as np
+from matplotlib import pyplot as plt
+from keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
+from keras import Model
+
+np.set_printoptions(threshold=np.inf)
+
+cifar10 = tf.keras.datasets.cifar10
+(x_train, y_train), (x_test, y_test) = cifar10.load_data()
+x_train, x_test = x_train / 255.0, x_test / 255.0
+
+
+class AlexNet8(Model):
+    def __init__(self):
+        super(AlexNet8, self).__init__()
+        self.c1 = Conv2D(filters=96, kernel_size=(3, 3))
+        self.b1 = BatchNormalization()
+        self.a1 = Activation('relu')
+        self.p1 = MaxPool2D(pool_size=(3, 3), strides=2)
+
+        self.c2 = Conv2D(filters=256, kernel_size=(3, 3))
+        self.b2 = BatchNormalization()
+        self.a2 = Activation('relu')
+        self.p2 = MaxPool2D(pool_size=(3, 3), strides=2)
+
+        self.c3 = Conv2D(filters=384, kernel_size=(3, 3), padding='same',
+                         activation='relu')
+                         
+        self.c4 = Conv2D(filters=384, kernel_size=(3, 3), padding='same',
+                         activation='relu')
+                         
+        self.c5 = Conv2D(filters=256, kernel_size=(3, 3), padding='same',
+                         activation='relu')
+        self.p3 = MaxPool2D(pool_size=(3, 3), strides=2)
+
+        self.flatten = Flatten()
+        self.f1 = Dense(2048, activation='relu')
+        self.d1 = Dropout(0.5)
+        self.f2 = Dense(2048, activation='relu')
+        self.d2 = Dropout(0.5)
+        self.f3 = Dense(10, activation='softmax')
+
+    def call(self, x):
+        x = self.c1(x)
+        x = self.b1(x)
+        x = self.a1(x)
+        x = self.p1(x)
+
+        x = self.c2(x)
+        x = self.b2(x)
+        x = self.a2(x)
+        x = self.p2(x)
+
+        x = self.c3(x)
+
+        x = self.c4(x)
+
+        x = self.c5(x)
+        x = self.p3(x)
+
+        x = self.flatten(x)
+        x = self.f1(x)
+        x = self.d1(x)
+        x = self.f2(x)
+        x = self.d2(x)
+        y = self.f3(x)
+        return y
+
+
+model = AlexNet8()
+
+model.compile(optimizer='adam',
+              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
+              metrics=['sparse_categorical_accuracy'])
+
+checkpoint_save_path = "./checkpoint/AlexNet8.ckpt"
+if os.path.exists(checkpoint_save_path + '.index'):
+    print('-------------load the model-----------------')
+    model.load_weights(checkpoint_save_path)
+
+cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
+                                                 save_weights_only=True,
+                                                 save_best_only=True)
+
+history = model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1,
+                    callbacks=[cp_callback])
+model.summary()
+
+# print(model.trainable_variables)
+file = open('./weights.txt', 'w')
+for v in model.trainable_variables:
+    file.write(str(v.name) + '\n')
+    file.write(str(v.shape) + '\n')
+    file.write(str(v.numpy()) + '\n')
+file.close()
+
+###############################################    show   ###############################################
+
+# 显示训练集和验证集的acc和loss曲线
+acc = history.history['sparse_categorical_accuracy']
+val_acc = history.history['val_sparse_categorical_accuracy']
+loss = history.history['loss']
+val_loss = history.history['val_loss']
+
+plt.subplot(1, 2, 1)
+plt.plot(acc, label='Training Accuracy')
+plt.plot(val_acc, label='Validation Accuracy')
+plt.title('Training and Validation Accuracy')
+plt.legend()
+
+plt.subplot(1, 2, 2)
+plt.plot(loss, label='Training Loss')
+plt.plot(val_loss, label='Validation Loss')
+plt.title('Training and Validation Loss')
+plt.legend()
+plt.show()

+ 131 - 0
verify_cifar10_inception10.py

@@ -0,0 +1,131 @@
+import tensorflow as tf
+import os
+import numpy as np
+from matplotlib import pyplot as plt
+from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense, \
+    GlobalAveragePooling2D
+from tensorflow.keras import Model
+
+np.set_printoptions(threshold=np.inf)
+
+cifar10 = tf.keras.datasets.cifar10
+(x_train, y_train), (x_test, y_test) = cifar10.load_data()
+x_train, x_test = x_train / 255.0, x_test / 255.0
+
+
+class ConvBNRelu(Model):
+    def __init__(self, ch, kernelsz=3, strides=1, padding='same'):
+        super(ConvBNRelu, self).__init__()
+        self.model = tf.keras.models.Sequential([
+            Conv2D(ch, kernelsz, strides=strides, padding=padding),
+            BatchNormalization(),
+            Activation('relu')
+        ])
+
+    def call(self, x):
+        x = self.model(x, training=False) #在training=False时,BN通过整个训练集计算均值、方差去做批归一化,training=True时,通过当前batch的均值、方差去做批归一化。推理时 training=False效果好
+        return x
+
+
+class InceptionBlk(Model):
+    def __init__(self, ch, strides=1):
+        super(InceptionBlk, self).__init__()
+        self.ch = ch
+        self.strides = strides
+        self.c1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
+        self.c2_1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
+        self.c2_2 = ConvBNRelu(ch, kernelsz=3, strides=1)
+        self.c3_1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
+        self.c3_2 = ConvBNRelu(ch, kernelsz=5, strides=1)
+        self.p4_1 = MaxPool2D(3, strides=1, padding='same')
+        self.c4_2 = ConvBNRelu(ch, kernelsz=1, strides=strides)
+
+    def call(self, x):
+        x1 = self.c1(x)
+        x2_1 = self.c2_1(x)
+        x2_2 = self.c2_2(x2_1)
+        x3_1 = self.c3_1(x)
+        x3_2 = self.c3_2(x3_1)
+        x4_1 = self.p4_1(x)
+        x4_2 = self.c4_2(x4_1)
+        # concat along axis=channel
+        x = tf.concat([x1, x2_2, x3_2, x4_2], axis=3)
+        return x
+
+
+class Inception10(Model):
+    def __init__(self, num_blocks, num_classes, init_ch=16, **kwargs):
+        super(Inception10, self).__init__(**kwargs)
+        self.in_channels = init_ch
+        self.out_channels = init_ch
+        self.num_blocks = num_blocks
+        self.init_ch = init_ch
+        self.c1 = ConvBNRelu(init_ch)
+        self.blocks = tf.keras.models.Sequential()
+        for block_id in range(num_blocks):
+            for layer_id in range(2):
+                if layer_id == 0:
+                    block = InceptionBlk(self.out_channels, strides=2)
+                else:
+                    block = InceptionBlk(self.out_channels, strides=1)
+                self.blocks.add(block)
+            # enlarger out_channels per block
+            self.out_channels *= 2
+        self.p1 = GlobalAveragePooling2D()
+        self.f1 = Dense(num_classes, activation='softmax')
+
+    def call(self, x):
+        x = self.c1(x)
+        x = self.blocks(x)
+        x = self.p1(x)
+        y = self.f1(x)
+        return y
+
+
+model = Inception10(num_blocks=2, num_classes=10)
+
+model.compile(optimizer='adam',
+              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
+              metrics=['sparse_categorical_accuracy'])
+
+checkpoint_save_path = "./checkpoint/Inception10.ckpt"
+if os.path.exists(checkpoint_save_path + '.index'):
+    print('-------------load the model-----------------')
+    model.load_weights(checkpoint_save_path)
+
+cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
+                                                 save_weights_only=True,
+                                                 save_best_only=True)
+
+history = model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1,
+                    callbacks=[cp_callback])
+model.summary()
+
+# print(model.trainable_variables)
+file = open('./weights.txt', 'w')
+for v in model.trainable_variables:
+    file.write(str(v.name) + '\n')
+    file.write(str(v.shape) + '\n')
+    file.write(str(v.numpy()) + '\n')
+file.close()
+
+###############################################    show   ###############################################
+
+# 显示训练集和验证集的acc和loss曲线
+acc = history.history['sparse_categorical_accuracy']
+val_acc = history.history['val_sparse_categorical_accuracy']
+loss = history.history['loss']
+val_loss = history.history['val_loss']
+
+plt.subplot(1, 2, 1)
+plt.plot(acc, label='Training Accuracy')
+plt.plot(val_acc, label='Validation Accuracy')
+plt.title('Training and Validation Accuracy')
+plt.legend()
+
+plt.subplot(1, 2, 2)
+plt.plot(loss, label='Training Loss')
+plt.plot(val_loss, label='Validation Loss')
+plt.title('Training and Validation Loss')
+plt.legend()
+plt.show()

+ 135 - 0
verify_cifar10_resnet18.py

@@ -0,0 +1,135 @@
+import tensorflow as tf
+import os
+import numpy as np
+from matplotlib import pyplot as plt
+from keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
+from keras import Model
+
+np.set_printoptions(threshold=np.inf)
+
+cifar10 = tf.keras.datasets.cifar10
+(x_train, y_train), (x_test, y_test) = cifar10.load_data()
+x_train, x_test = x_train / 255.0, x_test / 255.0
+
+
+class ResnetBlock(Model):
+
+    def __init__(self, filters, strides=1, residual_path=False):
+        super(ResnetBlock, self).__init__()
+        self.filters = filters
+        self.strides = strides
+        self.residual_path = residual_path
+
+        self.c1 = Conv2D(filters, (3, 3), strides=strides, padding='same', use_bias=False)
+        self.b1 = BatchNormalization()
+        self.a1 = Activation('relu')
+
+        self.c2 = Conv2D(filters, (3, 3), strides=1, padding='same', use_bias=False)
+        self.b2 = BatchNormalization()
+
+        # residual_path为True时,对输入进行下采样,即用1x1的卷积核做卷积操作,保证x能和F(x)维度相同,顺利相加
+        if residual_path:
+            self.down_c1 = Conv2D(filters, (1, 1), strides=strides, padding='same', use_bias=False)
+            self.down_b1 = BatchNormalization()
+
+        self.a2 = Activation('relu')
+
+    def call(self, inputs):
+        residual = inputs  # residual等于输入值本身,即residual=x
+        # 将输入通过卷积、BN层、激活层,计算F(x)
+        x = self.c1(inputs)
+        x = self.b1(x)
+        x = self.a1(x)
+
+        x = self.c2(x)
+        y = self.b2(x)
+
+        if self.residual_path:
+            residual = self.down_c1(inputs)
+            residual = self.down_b1(residual)
+
+        out = self.a2(y + residual)  # 最后输出的是两部分的和,即F(x)+x或F(x)+Wx,再过激活函数
+        return out
+
+
+class ResNet18(Model):
+
+    def __init__(self, block_list, initial_filters=64):  # block_list表示每个block有几个卷积层
+        super(ResNet18, self).__init__()
+        self.num_blocks = len(block_list)  # 共有几个block
+        self.block_list = block_list
+        self.out_filters = initial_filters
+        self.c1 = Conv2D(self.out_filters, (3, 3), strides=1, padding='same', use_bias=False)
+        self.b1 = BatchNormalization()
+        self.a1 = Activation('relu')
+        self.blocks = tf.keras.models.Sequential()
+        # 构建ResNet网络结构
+        for block_id in range(len(block_list)):  # 第几个resnet block
+            for layer_id in range(block_list[block_id]):  # 第几个卷积层
+
+                if block_id != 0 and layer_id == 0:  # 对除第一个block以外的每个block的输入进行下采样
+                    block = ResnetBlock(self.out_filters, strides=2, residual_path=True)
+                else:
+                    block = ResnetBlock(self.out_filters, residual_path=False)
+                self.blocks.add(block)  # 将构建好的block加入resnet
+            self.out_filters *= 2  # 下一个block的卷积核数是上一个block的2倍
+        self.p1 = tf.keras.layers.GlobalAveragePooling2D()
+        self.f1 = tf.keras.layers.Dense(10, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2())
+
+    def call(self, inputs):
+        x = self.c1(inputs)
+        x = self.b1(x)
+        x = self.a1(x)
+        x = self.blocks(x)
+        x = self.p1(x)
+        y = self.f1(x)
+        return y
+
+
+model = ResNet18([2, 2, 2, 2])
+
+model.compile(optimizer='adam',
+              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
+              metrics=['sparse_categorical_accuracy'])
+
+checkpoint_save_path = "./checkpoint/ResNet18.ckpt"
+if os.path.exists(checkpoint_save_path + '.index'):
+    print('-------------load the model-----------------')
+    model.load_weights(checkpoint_save_path)
+
+cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
+                                                 save_weights_only=True,
+                                                 save_best_only=True)
+
+history = model.fit(x_train, y_train, batch_size=32, epochs=50, validation_data=(x_test, y_test), validation_freq=1,
+                    callbacks=[cp_callback])
+model.summary()
+
+# print(model.trainable_variables)
+file = open('./weights.txt', 'w')
+for v in model.trainable_variables:
+    file.write(str(v.name) + '\n')
+    file.write(str(v.shape) + '\n')
+    file.write(str(v.numpy()) + '\n')
+file.close()
+
+###############################################    show   ###############################################
+
+# 显示训练集和验证集的acc和loss曲线
+acc = history.history['sparse_categorical_accuracy']
+val_acc = history.history['val_sparse_categorical_accuracy']
+loss = history.history['loss']
+val_loss = history.history['val_loss']
+
+plt.subplot(1, 2, 1)
+plt.plot(acc, label='Training Accuracy')
+plt.plot(val_acc, label='Validation Accuracy')
+plt.title('Training and Validation Accuracy')
+plt.legend()
+
+plt.subplot(1, 2, 2)
+plt.plot(loss, label='Training Loss')
+plt.plot(val_loss, label='Validation Loss')
+plt.title('Training and Validation Loss')
+plt.legend()
+plt.show()

+ 205 - 0
verify_cifar10_vgg16.py

@@ -0,0 +1,205 @@
+import tensorflow as tf
+import os
+import numpy as np
+from matplotlib import pyplot as plt
+from keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
+from keras import Model
+
+from class5.CIFAR10_CNN.watermark_regularizers import WatermarkRegularizer
+
+np.set_printoptions(threshold=np.inf)
+
+cifar10 = tf.keras.datasets.cifar10
+(x_train, y_train), (x_test, y_test) = cifar10.load_data()
+x_train, x_test = x_train / 255.0, x_test / 255.0
+
+# 程序参数
+target_blk_id = 0  # 目标层ID
+embed_dim = 256  # 水印长度
+scale = 0.01  # 正则化项偏置系数
+wtype = 'random'  # 水印类型
+randseed = 'none'
+
+class VGG16(Model):
+    def __init__(self):
+        super(VGG16, self).__init__()
+        self.c1 = Conv2D(filters=64, kernel_size=(3, 3), padding='same')  # 卷积层1
+        self.b1 = BatchNormalization()  # BN层1
+        self.a1 = Activation('relu')  # 激活层1
+        self.c2 = Conv2D(filters=64, kernel_size=(3, 3), padding='same', )
+        self.b2 = BatchNormalization()  # BN层1
+        self.a2 = Activation('relu')  # 激活层1
+        self.p1 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
+        self.d1 = Dropout(0.2)  # dropout层
+
+        self.c3 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')
+        self.b3 = BatchNormalization()  # BN层1
+        self.a3 = Activation('relu')  # 激活层1
+        self.c4 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')
+        self.b4 = BatchNormalization()  # BN层1
+        self.a4 = Activation('relu')  # 激活层1
+        self.p2 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
+        self.d2 = Dropout(0.2)  # dropout层
+
+        self.c5 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')
+        self.b5 = BatchNormalization()  # BN层1
+        self.a5 = Activation('relu')  # 激活层1
+        self.c6 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')
+        self.b6 = BatchNormalization()  # BN层1
+        self.a6 = Activation('relu')  # 激活层1
+        self.c7 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')
+        self.b7 = BatchNormalization()
+        self.a7 = Activation('relu')
+        self.p3 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
+        self.d3 = Dropout(0.2)
+
+        self.c8 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
+        self.b8 = BatchNormalization()  # BN层1
+        self.a8 = Activation('relu')  # 激活层1
+        self.c9 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
+        self.b9 = BatchNormalization()  # BN层1
+        self.a9 = Activation('relu')  # 激活层1
+        self.c10 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
+        self.b10 = BatchNormalization()
+        self.a10 = Activation('relu')
+        self.p4 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
+        self.d4 = Dropout(0.2)
+
+        self.c11 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
+        self.b11 = BatchNormalization()  # BN层1
+        self.a11 = Activation('relu')  # 激活层1
+        self.c12 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
+        self.b12 = BatchNormalization()  # BN层1
+        self.a12 = Activation('relu')  # 激活层1
+        self.c13 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
+        self.b13 = BatchNormalization()
+        self.a13 = Activation('relu')
+        self.p5 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
+        self.d5 = Dropout(0.2)
+
+        self.flatten = Flatten()
+        self.f1 = Dense(512, activation='relu')
+        self.d6 = Dropout(0.2)
+        self.f2 = Dense(512, activation='relu')
+        self.d7 = Dropout(0.2)
+        self.f3 = Dense(10, activation='softmax')
+
+    def call(self, x):
+        x = self.c1(x)
+        x = self.b1(x)
+        x = self.a1(x)
+        x = self.c2(x)
+        x = self.b2(x)
+        x = self.a2(x)
+        x = self.p1(x)
+        x = self.d1(x)
+
+        x = self.c3(x)
+        x = self.b3(x)
+        x = self.a3(x)
+        x = self.c4(x)
+        x = self.b4(x)
+        x = self.a4(x)
+        x = self.p2(x)
+        x = self.d2(x)
+
+        x = self.c5(x)
+        x = self.b5(x)
+        x = self.a5(x)
+        x = self.c6(x)
+        x = self.b6(x)
+        x = self.a6(x)
+        x = self.c7(x)
+        x = self.b7(x)
+        x = self.a7(x)
+        x = self.p3(x)
+        x = self.d3(x)
+
+        x = self.c8(x)
+        x = self.b8(x)
+        x = self.a8(x)
+        x = self.c9(x)
+        x = self.b9(x)
+        x = self.a9(x)
+        x = self.c10(x)
+        x = self.b10(x)
+        x = self.a10(x)
+        x = self.p4(x)
+        x = self.d4(x)
+
+        x = self.c11(x)
+        x = self.b11(x)
+        x = self.a11(x)
+        x = self.c12(x)
+        x = self.b12(x)
+        x = self.a12(x)
+        x = self.c13(x)
+        x = self.b13(x)
+        x = self.a13(x)
+        x = self.p5(x)
+        x = self.d5(x)
+
+        x = self.flatten(x)
+        x = self.f1(x)
+        x = self.d6(x)
+        x = self.f2(x)
+        x = self.d7(x)
+        y = self.f3(x)
+        return y
+
+
+model = VGG16()
+
+# 初始化水印过程
+b = np.ones((1, embed_dim))
+wmark_regularizer = WatermarkRegularizer(scale, b, wtype=wtype, randseed=randseed)
+
+# 添加正则化项到指定层的权重
+selected_layer = model.get_layer(index=target_blk_id)
+selected_layer.kernel_regularizer = wmark_regularizer
+
+model.compile(optimizer='adam',
+              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
+              metrics=['sparse_categorical_accuracy'])
+
+checkpoint_save_path = "./checkpoint/VGG16.ckpt"
+if os.path.exists(checkpoint_save_path + '.index'):
+    print('-------------load the model-----------------')
+    model.load_weights(checkpoint_save_path)
+
+cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
+                                                 save_weights_only=True,
+                                                 save_best_only=True)
+
+history = model.fit(x_train, y_train, batch_size=32, epochs=20, validation_data=(x_test, y_test), validation_freq=1,
+                    callbacks=[cp_callback])
+model.summary()
+
+# print(model.trainable_variables)
+file = open('./weights.txt', 'w')
+for v in model.trainable_variables:
+    file.write(str(v.name) + '\n')
+    file.write(str(v.shape) + '\n')
+    file.write(str(v.numpy()) + '\n')
+file.close()
+
+###############################################    show   ###############################################
+
+# 显示训练集和验证集的acc和loss曲线
+acc = history.history['sparse_categorical_accuracy']
+val_acc = history.history['val_sparse_categorical_accuracy']
+loss = history.history['loss']
+val_loss = history.history['val_loss']
+
+plt.subplot(1, 2, 1)
+plt.plot(acc, label='Training Accuracy')
+plt.plot(val_acc, label='Validation Accuracy')
+plt.title('Training and Validation Accuracy')
+plt.legend()
+
+plt.subplot(1, 2, 2)
+plt.plot(loss, label='Training Loss')
+plt.plot(val_loss, label='Validation Loss')
+plt.title('Training and Validation Loss')
+plt.legend()
+plt.show()