diff --git a/Decision/HLSwitch/HLSwitch_eval.py b/Decision/HLSwitch/HLSwitch_eval.py new file mode 100644 index 0000000..422c70e --- /dev/null +++ b/Decision/HLSwitch/HLSwitch_eval.py @@ -0,0 +1,103 @@ +# HLSwitch evaluation py +import tensorflow as tf +import numpy as np +import time +import cv2 +from keras.models import load_model +import time +from keras.preprocessing.image import load_img, save_img, img_to_array +import multiprocessing + + +config = tf.ConfigProto() +config.gpu_options.allow_growth = True +session = tf.Session(config=config) + +model_dir = './models/final_model_new_4.hdf5' + +class imageProcess(): + def preprocess(self, img): + #img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + img = img.astype(np.float32) + img /= 255.0 + return img + + def postprocess(self, img): + pass + + +class inference(): + def __init__(self): + self.model = load_model(model_dir) + self.switchValue = 0.5 + self.imageSize = (200,200) + self.improc = imageProcess() + self.showInferenceTime = True + self.ans_dict = {0:'GPS', 1:'VISION'} + + + def run(self, img): + start = time.time() + tempStr = "" + tempImage = self.improc.preprocess(img) + result = self.model.predict(tempImage.reshape(1, self.imageSize[0], self.imageSize[1], 1)) + prediction_result_prob = result * 100 + end = time.time() + if self.showInferenceTime: + print("Inference Time(s) :", end-start) + if result[0] > self.switchValue: + tempStr = "VISION" + else: + tempStr = "GPS" + + return [tempStr, prediction_result_prob] + + + +class subNode(): + def __init__(self): + #rospy.Subscriber('/camera/image_color', Image, self.imageCb) + runTh = multiprocessing.Process(target=self.run) + runTh.start() + + + def run(self): + pass + + def imageCb(self, msg): + self.trafficIdx = msg.data + print(self.trafficIdx) + pass + +class pubNode(): + def __init__(self): + #self.switchPub = rospy.Publisher(rootname+pubAccel, Int16, queue_size = 1) + pass + def run(self): + pass + +class main(): + def run(self): + hlSwitch = inference() + video_dir = 'test.webm' + cap = cv2.VideoCapture(video_dir) + while(1): + ret, frame = cap.read() + if not ret: + continue + tempImage = cv2.resize(frame, (200, 200)) + tempImage = cv2.cvtColor(tempImage, cv2.COLOR_BGR2GRAY) + location = (0, 40) + font = cv2.FONT_HERSHEY_SIMPLEX # hand-writing style font + fontScale = 0.9 + strn = str(hlSwitch.run(tempImage)) + cv2.putText(frame, strn, location, font, fontScale, (0,100,255), 2) + cv2.imshow('asd', frame) + + + cv2.waitKey(1) + + +if __name__ == "__main__": + run = main() + run.run() diff --git a/Decision/HLSwitch/dataset_raw/annotTool.py b/Decision/HLSwitch/dataset_raw/annotTool.py new file mode 100644 index 0000000..3cf0e03 --- /dev/null +++ b/Decision/HLSwitch/dataset_raw/annotTool.py @@ -0,0 +1,73 @@ +# Dataset Gen + +import cv2 +import os + + +videoName = str(input("Video Name : (with Extension *.mp4) ")) +try: + print('./video/' + videoName) + cap = cv2.VideoCapture('./video/' + videoName) +except: + print("Error while reading video file")# Dataset Gen + +idx = 0 +while(1): + ret, frame = cap.read() + print("Arrow Key LEFT : GPS\nArrow Key Right VISION\nArrow Key Up Save all and Exit") + if ret: + + cv2.imshow('SCREEN', frame) + ket = cv2.waitKey(0) + print(ket) + if ket == 81 : + # Left + dir = './GPS/' + print("GPS") + + cv2.imwrite(dir + videoName + '_' + str(idx) + '.jpg', cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), (640, 360))) + pass + elif ket == 83: + # Right + dir = './VISION/' + cv2.imwrite(dir + videoName + '_' + str(idx) + '.jpg', cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), (640, 360))) + print("VISION") + pass + elif ket == 82: + print("SAVE") + break + + else: + break + idx += 1 + +cap.release() +cv2.destroyAllWindows() + + +while(1): + ret, frame = cap.read() + print("Arrow Key LEFT : GPS\nArrow Key Right VISION\nArrow Key Up Save all and Exit") + if ret: + cv2.imshow('SCREEN', frame) + ket = cv2.waiyKey(0) + + if ket == 2424832 : + # Left + dir = './GPS/' + print("GPS") + pass + elif ket == 2555904: + # Right + dir = './VISION' + print("VISION") + pass + elif ket == 2490368: + print("SAVE") + break + + else: + break + +cap.release() +cv2.destroyAllWindows() diff --git a/Decision/HLSwitch/img2Pixel.py b/Decision/HLSwitch/img2Pixel.py new file mode 100644 index 0000000..4c33241 --- /dev/null +++ b/Decision/HLSwitch/img2Pixel.py @@ -0,0 +1,80 @@ +# 이미지 정규화 하는 코드 +# 그레이스케일, 리사이징, 정규화 (/255.0) 하고 npy에 stack +# 사진들이 분류 되어있는 폴더의 상위 디렉토리에 넣고 돌리기... + +# script for preparing datasets, loading fer data and generating scaled images +import numpy as np +import cv2 +from PIL import Image +import sys +import os + +emotions = {0:'GPS', 1:'VISION'} +global outfile, data, data0, label, label0, desired_size + +def _cls(): + os.system("cls" if os.name == "nt" else "clear") + +def run(data, label, desired_size): + data0 = data[:] + label0 = label[:] + for root, dirs, files in os.walk(os.getcwd(), topdown=False): + + for name in files: + try: + + filename = os.path.join(root, name) + print(filename) + cvim = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) + grayCv = cv2.resize(cvim, desired_size) + print(grayCv.shape) + pdata = np.array(grayCv.reshape(-1)).astype('float32') + pdata = [np.divide(d, 255.0) for d in pdata] + try: + data = np.concatenate((data, [pdata])) + except Exception as ex: + print('first start', ex) + data = np.array([pdata]) + + emotios_k = list(emotions.keys()) + for key in emotios_k: + if emotions[key] in filename: + label = np.concatenate((label, [key])) + _cls() + print(f'add img2pix {name}:{emotions[key]}') + + except Exception as rex: + print('Image OpenError!', rex) + print(data.shape) + print(f"All done! \n{len(data)} files are coverted and added ") + name = "Scaled_" + str(len(data)) + ".npy" + name_lab = "labeld_" + str(len(data)) + ".npy" + np.save(name, data) + np.save(name_lab, label) + + + +# load previous npy file if exist +try: + global outfile, data, data0, label, label0, desired_size + var1, var2, desired_size = '', '', (320, 180) + var1 = sys.argv[2] #pixel + var2 = sys.argv[3] #label + data = [[]] + dataO = data[:] + label = [] + labelO = label[:] + desired_size = eval(sys.argv[1]) + + run(data, label, desired_size) +except Exception as ex: + tuto = ''' + image2pixel by shinkansan + =Manual= + e.g) python img2pixel.py '(48, 48)' data.npy label.npy + argv[1] = img resize..tuple + argv[2] = merging existing data npy file + argv[3] = merging existing label npy file + ''' + print(tuto) + print(ex) diff --git a/Decision/HLSwitch/model/__pycache__/netLoader.cpython-36.pyc b/Decision/HLSwitch/model/__pycache__/netLoader.cpython-36.pyc new file mode 100644 index 0000000..d1d0e8e Binary files /dev/null and b/Decision/HLSwitch/model/__pycache__/netLoader.cpython-36.pyc differ diff --git a/Decision/HLSwitch/model/mcdropout_ferVGG.py b/Decision/HLSwitch/model/mcdropout_ferVGG.py new file mode 100644 index 0000000..5f15e19 --- /dev/null +++ b/Decision/HLSwitch/model/mcdropout_ferVGG.py @@ -0,0 +1,80 @@ +''' +VGG in modular keras backend +''' + +import keras +from keras.layers import Dense, Conv2D, Dropout, Input, Activation, Flatten +from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D +from keras.optimizers import Adam +from keras.regularizers import l2 +from keras.losses import categorical_crossentropy + + + +def predictionMC(): + pass + + +def fer_vgg(input_shape=(48,48,1), input_classes= 7): + inputs = keras.Input(shape=input_shape) + # Conv Block 1 + x = Conv2D(64, (3,3), activation = 'relu', padding='same')(inputs) + x = Dropout(0.25)(x, training = True) + x = Conv2D(64, (3,3), activation = 'relu', padding = 'same')(inputs) + x = MaxPooling2D(pool_size = (2,2) , strides = (2,2))(x) + x = Dropout(0.25)(x, training = True) + + # Conv Block 2 + x = Conv2D(128, (3,3), activation='relu', padding='same')(x) + x = Dropout(0.25)(x, training = True) + x = Conv2D(128, (3,3), activation='relu', padding='same', kernel_regularizer=l2(0.001))(x) + x = MaxPooling2D((2,2), strides=(2,2))(x) + x = Dropout(0.25)(x, training = True) + + # Conv Block 3 + x = Conv2D(256, (3, 3), padding='same', activation ='relu')(x) + x = Dropout(0.25)(x, training = True) + x = Conv2D(256, (3, 3), padding='same', activation = 'relu', kernel_regularizer=l2(0.001))(x) + x = Dropout(0.25)(x, training = True) + x = Conv2D(256, (3, 3), padding='same', activation = 'relu', kernel_regularizer=l2(0.001))(x) + x = AveragePooling2D(pool_size = (2, 2), strides=(2, 2))(x) + x = Dropout(0.25)(x, training=True) + + # Conv Block 4 + x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu')(x) + x = Dropout(0.25)(x, training = True) + x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu')(x) + x = Dropout(0.25)(x, training = True) + x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001))(x) + x = Dropout(0.25)(x, training = True) + x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001))(x) + x = AveragePooling2D(pool_size = (2,2), strides=(2,2))(x) + x = Dropout(0.25)(x, training=True) + + # Conv Block 5 + x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu')(x) + x = Dropout(0.25)(x, training = True) + x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu')(x) + x = Dropout(0.25)(x, training = True) + x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001))(x) + x = Dropout(0.25)(x, training = True) + x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001))(x) + x = AveragePooling2D(pool_size = (2,2), strides=(2,2))(x) + x = Dropout(0.25)(x, training=True) + + # FC Layers + x = Flatten()(x) + #x = Dense(4096, activation='relu')(x) + #x = Dropout(0.25)(x, training = True) + x = Dense(512, activation='relu')(x) + x = Dropout(0.25)(x, training=True) + x = Dense(256, activation='relu')(x) + x = Dropout(0.25)(x) + output = Dense(7, activation='softmax')(x) + + model = keras.Model(inputs, output) + return model + +if __name__ == "__main__": + model = fer_vgg() + model.summary() diff --git a/Decision/HLSwitch/model/netLoader.py b/Decision/HLSwitch/model/netLoader.py new file mode 100644 index 0000000..106cfc1 --- /dev/null +++ b/Decision/HLSwitch/model/netLoader.py @@ -0,0 +1,142 @@ +#Light VGG shinkansan +import tensorflow as tf +from keras.models import Sequential +from keras.optimizers import SGD,Adadelta +from keras.layers.core import Flatten, Dense, Dropout +from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D,AveragePooling2D +from keras.layers import Input, Dense, Dropout, Activation, Flatten, Conv2D, BatchNormalization +from keras.layers.advanced_activations import PReLU +# import the necessary packages +from keras.applications import ResNet50 +from keras.applications import InceptionV3 +from keras.applications import Xception # TensorFlow ONLY +from keras.applications import VGG16 +from keras.applications import VGG19 +from keras.applications import imagenet_utils +from keras.applications.inception_v3 import preprocess_input +from keras.preprocessing.image import img_to_array +from keras.preprocessing.image import load_img +import numpy as np +from keras.applications.xception import Xception, preprocess_input +from keras.optimizers import Adam +from keras.regularizers import l2 +from keras.preprocessing import image +from keras.losses import categorical_crossentropy +from keras.layers import Dense, GlobalAveragePooling2D +from keras.models import Model +from keras.utils import to_categorical +from keras.callbacks import ModelCheckpoint +import keras +gpu_options = tf.GPUOptions(allow_growth=True) +sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) +keras.backend.tensorflow_backend.set_session(sess) +import math +import argparse +import cv2 + +McDropout = False + +def fer_vgg(input_shape=(48, 48,1), input_classes=7): + model1 = Sequential() + #block 1 + model1.add(Conv2D(64, (3,3),activation='relu', padding='same',input_shape=input_shape)) + model1.add(Conv2D(64, (3,3), activation='relu', padding = 'same')) + model1.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) + #model1.add(Dropout(0.25, training=McDropout)) + #block 2 + model1.add(Conv2D(128, (3, 3), padding='same')) + model1.add(PReLU(init='zero', weights=None)) + model1.add(Conv2D(128, (3,3), padding='same')) + model1.add(PReLU(init='zero', weights=None)) + model1.add(MaxPooling2D((2,2), strides = (2,2), name = 'poo2_2')) + #model1.add(Dropout(0.25, training=McDropout)) + #block 3 + model1.add(Conv2D(256, (3, 3), padding='same', activation='relu')) + model1.add(Conv2D(256, (3, 3), padding='same')) + model1.add(Conv2D(256, (3,3), padding='same', activation='relu', kernel_regularizer=l2(0.001))) + model1.add(AveragePooling2D(pool_size=(2, 2), strides=(2, 2))) + #model1.add(Dropout(0.25, training=McDropout)) + #block 4 + + model1.add(Conv2D(512, (3, 3), padding='same', activation='relu')) + model1.add(Conv2D(512, (3, 3), padding='same', activation = 'relu')) + model1.add(Conv2D(512, (3,3), padding='same', activation= 'relu')) + model1.add(Conv2D(512, (3, 3), padding='same', activation='relu', kernel_regularizer=l2(0.001))) + model1.add(AveragePooling2D(pool_size=(2, 2), strides=(2, 2))) + #model1.add(Dropout(0.25, training=True)) + #block 5 + model1.add(Conv2D(512, (3,3), padding='same', activation='relu')) + model1.add(Conv2D(512, (3,3), padding='same', activation='relu')) + model1.add(Conv2D(512, (3,3), padding='same', activation='relu')) + model1.add(Conv2D(512, (3,3), padding='same', activation='relu', kernel_regularizer=l2(0.001))) + model1.add(AveragePooling2D(pool_size=(2, 2), strides=(2, 2))) + #model1.add(Dropout(0.25, training=McDropout)) + + # Fc layer + model1.add(Flatten()) + model1.add(Dense(4096)) + model1.add(PReLU(init='zero', weights=None)) + #model1.add(Dropout(0.5, training=McDropout)) + model1.add(Dense(1024)) + model1.add(PReLU(init='zero', weights=None)) + #model1.add(Dropout(0.5, training=McDropout)) + model1.add(Dense(512)) + model1.add(PReLU(init='zero', weights=None)) + #model1.add(Dropout(0.3, training=McDropout)) + model1.add(Dense(input_classes)) + model1.add(Activation('softmax')) + + return model1 + +def hlswitch(): + + # Convolution Neural Networks (CNN) + model = Sequential() + + model.add(Conv2D(8, kernel_size=(3,3), padding='same', input_shape = (200,200,1))) + model.add(Activation('relu')) + model.add(MaxPooling2D(pool_size=(3, 3))) + + model.add(Conv2D(16, kernel_size=(3,3), padding='same')) + model.add(BatchNormalization()) + model.add(Activation('relu')) + model.add(MaxPooling2D(pool_size=(2, 2))) + + model.add(Conv2D(32, kernel_size=(3,3), padding='same')) + model.add(BatchNormalization()) + model.add(Activation('relu')) + model.add(MaxPooling2D(pool_size=(2, 2))) + + model.add(Conv2D(32, kernel_size=(3,3), padding='same')) + model.add(BatchNormalization()) + model.add(Activation('relu')) + model.add(MaxPooling2D(pool_size=(2, 2))) + + model.add(Flatten()) + model.add(Dense(1, activation='sigmoid')) + return model + + + +def sample(model, input_shape, classes): + models = {"vgg16" : VGG16, "vgg19":VGG19, "inception" : InceptionV3, "xception" : Xception, "resnet" : ResNet50} + if model in models: + + network = models[model] + model = network(include_top = False, input_shape=input_shape) + + x = model.output + x = GlobalAveragePooling2D()(x) + x = Dense(1024, activation='relu')(x) + prediction = Dense(classes, activation='softmax')(x) + Fmodel = Model(inputs=model.inputs, outputs=prediction) + + Fmodel.compile(loss = categorical_crossentropy, optimizer=Adam(lr=1e-4),metrics=['accuracy']) + Fmodel.summary() + return Fmodel + +if __name__ == "__main__": + model1 = fer_vgg((48,48,1), 7) + model1.summary() + + models = {"vgg16" : VGG16, "vgg19":VGG19, "inception" : InceptionV3, "xception" : Xception, "resnet" : ResNet50} diff --git a/Decision/HLSwitch/models/final_model_new_2.hdf5 b/Decision/HLSwitch/models/final_model_new_2.hdf5 new file mode 100644 index 0000000..6b1b59c Binary files /dev/null and b/Decision/HLSwitch/models/final_model_new_2.hdf5 differ diff --git a/Decision/HLSwitch/models/final_model_new_3.hdf5 b/Decision/HLSwitch/models/final_model_new_3.hdf5 new file mode 100644 index 0000000..0d916f4 Binary files /dev/null and b/Decision/HLSwitch/models/final_model_new_3.hdf5 differ diff --git a/Decision/HLSwitch/models/final_model_new_4.hdf5 b/Decision/HLSwitch/models/final_model_new_4.hdf5 new file mode 100644 index 0000000..0303a87 Binary files /dev/null and b/Decision/HLSwitch/models/final_model_new_4.hdf5 differ diff --git a/Decision/HLSwitch/readme.md b/Decision/HLSwitch/readme.md new file mode 100644 index 0000000..63e4ed6 --- /dev/null +++ b/Decision/HLSwitch/readme.md @@ -0,0 +1,35 @@ +## ARTIV HLSwitch +호영과 승기의 Vision, GPS 추종 알고리즘을 카메라의 전방 데이터를 이용하여 적절하게 사용하는 딥러닝 네트워크 +왜 HL Switch 인지는 묻지 말자, 호(H) 이(L) 이긴 한데 성과 이름의 혼용인지는 방금 마크다운 작성하면서 깨달았다. + +그래도 HSS 보다는 HLS 라고 부르는게 입에 착착 붙는다. + + ## dependencies + 1. tensorflow==1.14.0 + 2. keras==2.3.0 + + (텐서플로우 2만 아니면 된다.) + +## How to run +`python3 HLSwitch_eval.py` + + +## How to Train +`python3 train.py` + +## How to make dataset +dataset_raw 폴더에서 video 폴더에 영상을 넣고 `annotTool.py` 실행 + +GPS, VISION 폴더에 영상 이름과 순서를 가지고 생성된다. 그리고 이 폴더를 상위 디렉토리의 TrainData에 넣어서 + +위 파이썬 파일을 실행하면 알아서 된다. 자세한 파라미터는 코드 내부 참고 + + +#### Version Control + +최초 릴리즈 시점은 train.py의 v04 버전이 최초 릴리즈임. + +|버전명|작성자|업데이트 내용| +|---|---|---| +|04|신칸센|최초 릴리즈, 기본적인 기능 충실| +| | | diff --git a/Decision/HLSwitch/train.py b/Decision/HLSwitch/train.py new file mode 100644 index 0000000..9075c9a --- /dev/null +++ b/Decision/HLSwitch/train.py @@ -0,0 +1,127 @@ +## Train py for HL Switch +# Binary Classification +# 2020.09.12. Shin +# v04 + +import numpy as np +import matplotlib.pyplot as plt +from keras.datasets import mnist +from keras.models import Sequential +from keras.callbacks import ModelCheckpoint +from keras.layers.core import Dense, Dropout, Activation, Flatten +from keras.layers.convolutional import Convolution2D, MaxPooling2D +from keras.preprocessing.image import ImageDataGenerator +from keras.utils import np_utils +from keras.utils import multi_gpu_model +#from keras.regularizers import l2, activity_l2 +from keras.optimizers import SGD, RMSprop +import matplotlib.pyplot as plt +import h5py +import keras +from keras.callbacks import EarlyStopping +import model.netLoader as netLoader +import tensorflow as tf +import os +session_config = tf.ConfigProto() +session_config.gpu_options.allow_growth = True +session = tf.Session(config=session_config) + +use_date_gen = True +batch_size = 32 + +np.random.seed(77778) # for reproducibility + + +def visual(hist): + loss_ax = plt.subplot() + acc_ax = loss_ax.twinx() + + loss_ax.plot(hist.history['loss'], 'y', label='train loss') + loss_ax.plot(hist.history['val_loss'], 'r', label='val loss') + + acc_ax.plot(hist.history['acc'], 'b', label='train acc') + acc_ax.plot(hist.history['val_acc'], 'g', label='val acc') + + loss_ax.set_xlabel('epoch') + loss_ax.set_ylabel('loss') + acc_ax.set_ylabel('accuray') + + loss_ax.legend(loc='upper left') + acc_ax.legend(loc='lower left') + + plt.show() + pass + + +# Load the scaled data, both pixels and labels +X_train = np.load('./data/Scaled_6.npy') +Y_tr_labels = np.load('./data/labeld_6.npy') + +# reshape the given pixels into 48 X 48 images +if use_date_gen: + + train_datagen = ImageDataGenerator( + rescale=1./255, + shear_range=0.2, + zoom_range=0.2, + horizontal_flip=True) + + + test_datagen = ImageDataGenerator(rescale=1./255) + + train_generator = train_datagen.flow_from_directory( + './TrainData/', + color_mode = 'grayscale', + target_size=(200, 200), + batch_size=batch_size, + class_mode='binary') + +else: + + shapex, shapey = 320, 180 + X_train = X_train.reshape(X_train.shape[0], shapey, shapex, 1) + # convert labels to one-hot-encoding + Y_tr_labels = np_utils.to_categorical(Y_tr_labels).astype('int8') + + + + +# define the model 32 filters in first convolution layer followed by a max pooling and dense layer with dropout (50%) +tb_hist = keras.callbacks.TensorBoard(log_dir='./graph', histogram_freq=0, write_graph=True, write_images=True) +early_stopping = EarlyStopping(monitor='val_loss', patience = 20) +MODEL_SAVE_FOLDER_PATH = './models/' +if not os.path.exists(MODEL_SAVE_FOLDER_PATH): + os.mkdir(MODEL_SAVE_FOLDER_PATH) + +model_path = MODEL_SAVE_FOLDER_PATH + '{epoch:02d}-{val_loss:.4f}.hdf5' + +cb_checkpoint = ModelCheckpoint(filepath=model_path, monitor='val_loss', + verbose=1, save_best_only=True) + +################################### model1 +#model = netLoader.fer_vgg(input_shape=(180,320,1), input_classes=2) +model = netLoader.hlswitch() +ans_dict = {0:'GPS', 1:'VISION'} + +# training the model with cross sgd and nesterov momentum +#model = multi_gpu_model(model, gpus=3) +sgd = SGD(lr=0.055, decay=1e-6, momentum=0.9, nesterov=True) +optm = RMSprop(lr=0.004, rho=0.9, epsilon=1e-08, decay=0.0) +model.compile(loss='binary_crossentropy', optimizer=optm, metrics=['accuracy', 'loss']) +hist = '' +if use_date_gen: + hist = model.fit_generator( + train_generator, + steps_per_epoch=2000 // batch_size, + epochs=100) + +else: + hist = model.fit(X_train, Y_tr_labels, validation_split=0.1, batch_size=batch_size, nb_epoch=10, callbacks=[tb_hist, early_stopping, cb_checkpoint]) + +print("Training Complete!") +# save the model weights + +model.save('./models/final_model_new_4.hdf5') +del model + +visual(hist)