import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, InputLayer, Dropout, Conv1D, Conv2D, Flatten, Reshape, MaxPooling1D, MaxPooling2D, AveragePooling2D, BatchNormalization, Permute, ReLU, Softmax, Concatenate, Input
from tensorflow.keras.optimizers.legacy import Adam
from tensorflow.keras.callbacks import EarlyStopping
EPOCHS = args.epochs or 50
LEARNING_RATE = args.learning_rate or 0.0005
# If True, non-deterministic functions (e.g. shuffling batches) are not used.
# This is False by default.
ENSURE_DETERMINISM = args.ensure_determinism
# this controls the batch size, or you can manipulate the tf.data.Dataset objects yourself
BATCH_SIZE = args.batch_size or 32
if not ENSURE_DETERMINISM:
train_dataset = train_dataset.shuffle(buffer_size=BATCH_SIZE*4)
train_dataset=train_dataset.batch(BATCH_SIZE, drop_remainder=False)
validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=False)
channels = 1
columns = 50
rows = input_length // (columns * channels)
# Input layer
input_layer = Input(shape=(input_length, ))
# Split the input into two halves
half_length = input_length // 2
# First half
first_half = Reshape((rows // 2, columns, channels))(input_layer[:, :half_length])
branch1 = Conv2D(8, kernel_size=3, kernel_constraint=tf.keras.constraints.MaxNorm(1), padding='same', activation='relu')(first_half)
branch1 = MaxPooling2D(pool_size=2, strides=2, padding='same')(branch1)
branch1 = Dropout(0.5)(branch1)
branch1 = Conv2D(16, kernel_size=3, kernel_constraint=tf.keras.constraints.MaxNorm(1), padding='same', activation='relu')(branch1)
branch1 = MaxPooling2D(pool_size=2, strides=2, padding='same')(branch1)
branch1 = Dropout(0.5)(branch1)
branch1 = Flatten()(branch1)
# Second half
second_half = Reshape((rows // 2, columns, channels))(input_layer[:, half_length:])
branch2 = Conv2D(8, kernel_size=3, kernel_constraint=tf.keras.constraints.MaxNorm(1), padding='same', activation='relu')(second_half)
branch2 = MaxPooling2D(pool_size=2, strides=2, padding='same')(branch2)
branch2 = Dropout(0.5)(branch2)
branch2 = Conv2D(16, kernel_size=3, kernel_constraint=tf.keras.constraints.MaxNorm(1), padding='same', activation='relu')(branch2)
branch2 = MaxPooling2D(pool_size=2, strides=2, padding='same')(branch2)
branch2 = Dropout(0.5)(branch2)
branch2 = Flatten()(branch2)
# Concatenate the outputs of both branches
merged = Concatenate()([branch1, branch2])
# Final dense layer
output_layer = Dense(classes, name='y_pred', activation='softmax')(merged)
# Create model
model = Model(inputs=input_layer, outputs=output_layer)
# this controls the learning rate
opt = Adam(learning_rate=LEARNING_RATE, beta_1=0.9, beta_2=0.999)
callbacks.append(BatchLoggerCallback(BATCH_SIZE, train_sample_count, epochs=EPOCHS, ensure_determinism=ENSURE_DETERMINISM))
# Early stop
callbacks.append(EarlyStopping(patience=3))
# train the neural network
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.fit(train_dataset, epochs=EPOCHS, validation_data=validation_dataset, verbose=2, callbacks=callbacks)
# Use this flag to disable per-channel quantization for a model.
# This can reduce RAM usage for convolutional models, but may have
# an impact on accuracy.
disable_per_channel_quantization = False