Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
shinkansan committed Sep 12, 2020
1 parent b9c25e4 commit 86b62ca
Show file tree
Hide file tree
Showing 11 changed files with 640 additions and 0 deletions.
103 changes: 103 additions & 0 deletions Decision/HLSwitch/HLSwitch_eval.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# HLSwitch evaluation py
import tensorflow as tf
import numpy as np
import time
import cv2
from keras.models import load_model
import time
from keras.preprocessing.image import load_img, save_img, img_to_array
import multiprocessing


config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)

model_dir = './models/final_model_new_4.hdf5'

class imageProcess():
def preprocess(self, img):
#img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = img.astype(np.float32)
img /= 255.0
return img

def postprocess(self, img):
pass


class inference():
def __init__(self):
self.model = load_model(model_dir)
self.switchValue = 0.5
self.imageSize = (200,200)
self.improc = imageProcess()
self.showInferenceTime = True
self.ans_dict = {0:'GPS', 1:'VISION'}


def run(self, img):
start = time.time()
tempStr = ""
tempImage = self.improc.preprocess(img)
result = self.model.predict(tempImage.reshape(1, self.imageSize[0], self.imageSize[1], 1))
prediction_result_prob = result * 100
end = time.time()
if self.showInferenceTime:
print("Inference Time(s) :", end-start)
if result[0] > self.switchValue:
tempStr = "VISION"
else:
tempStr = "GPS"

return [tempStr, prediction_result_prob]



class subNode():
def __init__(self):
#rospy.Subscriber('/camera/image_color', Image, self.imageCb)
runTh = multiprocessing.Process(target=self.run)
runTh.start()


def run(self):
pass

def imageCb(self, msg):
self.trafficIdx = msg.data
print(self.trafficIdx)
pass

class pubNode():
def __init__(self):
#self.switchPub = rospy.Publisher(rootname+pubAccel, Int16, queue_size = 1)
pass
def run(self):
pass

class main():
def run(self):
hlSwitch = inference()
video_dir = 'test.webm'
cap = cv2.VideoCapture(video_dir)
while(1):
ret, frame = cap.read()
if not ret:
continue
tempImage = cv2.resize(frame, (200, 200))
tempImage = cv2.cvtColor(tempImage, cv2.COLOR_BGR2GRAY)
location = (0, 40)
font = cv2.FONT_HERSHEY_SIMPLEX # hand-writing style font
fontScale = 0.9
strn = str(hlSwitch.run(tempImage))
cv2.putText(frame, strn, location, font, fontScale, (0,100,255), 2)
cv2.imshow('asd', frame)


cv2.waitKey(1)


if __name__ == "__main__":
run = main()
run.run()
73 changes: 73 additions & 0 deletions Decision/HLSwitch/dataset_raw/annotTool.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
# Dataset Gen

import cv2
import os


videoName = str(input("Video Name : (with Extension *.mp4) "))
try:
print('./video/' + videoName)
cap = cv2.VideoCapture('./video/' + videoName)
except:
print("Error while reading video file")# Dataset Gen

idx = 0
while(1):
ret, frame = cap.read()
print("Arrow Key LEFT : GPS\nArrow Key Right VISION\nArrow Key Up Save all and Exit")
if ret:

cv2.imshow('SCREEN', frame)
ket = cv2.waitKey(0)
print(ket)
if ket == 81 :
# Left
dir = './GPS/'
print("GPS")

cv2.imwrite(dir + videoName + '_' + str(idx) + '.jpg', cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), (640, 360)))
pass
elif ket == 83:
# Right
dir = './VISION/'
cv2.imwrite(dir + videoName + '_' + str(idx) + '.jpg', cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), (640, 360)))
print("VISION")
pass
elif ket == 82:
print("SAVE")
break

else:
break
idx += 1

cap.release()
cv2.destroyAllWindows()


while(1):
ret, frame = cap.read()
print("Arrow Key LEFT : GPS\nArrow Key Right VISION\nArrow Key Up Save all and Exit")
if ret:
cv2.imshow('SCREEN', frame)
ket = cv2.waiyKey(0)

if ket == 2424832 :
# Left
dir = './GPS/'
print("GPS")
pass
elif ket == 2555904:
# Right
dir = './VISION'
print("VISION")
pass
elif ket == 2490368:
print("SAVE")
break

else:
break

cap.release()
cv2.destroyAllWindows()
80 changes: 80 additions & 0 deletions Decision/HLSwitch/img2Pixel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# 이미지 정규화 하는 코드
# 그레이스케일, 리사이징, 정규화 (/255.0) 하고 npy에 stack
# 사진들이 분류 되어있는 폴더의 상위 디렉토리에 넣고 돌리기...

# script for preparing datasets, loading fer data and generating scaled images
import numpy as np
import cv2
from PIL import Image
import sys
import os

emotions = {0:'GPS', 1:'VISION'}
global outfile, data, data0, label, label0, desired_size

def _cls():
os.system("cls" if os.name == "nt" else "clear")

def run(data, label, desired_size):
data0 = data[:]
label0 = label[:]
for root, dirs, files in os.walk(os.getcwd(), topdown=False):

for name in files:
try:

filename = os.path.join(root, name)
print(filename)
cvim = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
grayCv = cv2.resize(cvim, desired_size)
print(grayCv.shape)
pdata = np.array(grayCv.reshape(-1)).astype('float32')
pdata = [np.divide(d, 255.0) for d in pdata]
try:
data = np.concatenate((data, [pdata]))
except Exception as ex:
print('first start', ex)
data = np.array([pdata])

emotios_k = list(emotions.keys())
for key in emotios_k:
if emotions[key] in filename:
label = np.concatenate((label, [key]))
_cls()
print(f'add img2pix {name}:{emotions[key]}')

except Exception as rex:
print('Image OpenError!', rex)
print(data.shape)
print(f"All done! \n{len(data)} files are coverted and added ")
name = "Scaled_" + str(len(data)) + ".npy"
name_lab = "labeld_" + str(len(data)) + ".npy"
np.save(name, data)
np.save(name_lab, label)



# load previous npy file if exist
try:
global outfile, data, data0, label, label0, desired_size
var1, var2, desired_size = '', '', (320, 180)
var1 = sys.argv[2] #pixel
var2 = sys.argv[3] #label
data = [[]]
dataO = data[:]
label = []
labelO = label[:]
desired_size = eval(sys.argv[1])

run(data, label, desired_size)
except Exception as ex:
tuto = '''
image2pixel by shinkansan
=Manual=
e.g) python img2pixel.py '(48, 48)' data.npy label.npy
argv[1] = img resize..tuple
argv[2] = merging existing data npy file
argv[3] = merging existing label npy file
'''
print(tuto)
print(ex)
Binary file not shown.
80 changes: 80 additions & 0 deletions Decision/HLSwitch/model/mcdropout_ferVGG.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
'''
VGG in modular keras backend
'''

import keras
from keras.layers import Dense, Conv2D, Dropout, Input, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.losses import categorical_crossentropy



def predictionMC():
pass


def fer_vgg(input_shape=(48,48,1), input_classes= 7):
inputs = keras.Input(shape=input_shape)
# Conv Block 1
x = Conv2D(64, (3,3), activation = 'relu', padding='same')(inputs)
x = Dropout(0.25)(x, training = True)
x = Conv2D(64, (3,3), activation = 'relu', padding = 'same')(inputs)
x = MaxPooling2D(pool_size = (2,2) , strides = (2,2))(x)
x = Dropout(0.25)(x, training = True)

# Conv Block 2
x = Conv2D(128, (3,3), activation='relu', padding='same')(x)
x = Dropout(0.25)(x, training = True)
x = Conv2D(128, (3,3), activation='relu', padding='same', kernel_regularizer=l2(0.001))(x)
x = MaxPooling2D((2,2), strides=(2,2))(x)
x = Dropout(0.25)(x, training = True)

# Conv Block 3
x = Conv2D(256, (3, 3), padding='same', activation ='relu')(x)
x = Dropout(0.25)(x, training = True)
x = Conv2D(256, (3, 3), padding='same', activation = 'relu', kernel_regularizer=l2(0.001))(x)
x = Dropout(0.25)(x, training = True)
x = Conv2D(256, (3, 3), padding='same', activation = 'relu', kernel_regularizer=l2(0.001))(x)
x = AveragePooling2D(pool_size = (2, 2), strides=(2, 2))(x)
x = Dropout(0.25)(x, training=True)

# Conv Block 4
x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu')(x)
x = Dropout(0.25)(x, training = True)
x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu')(x)
x = Dropout(0.25)(x, training = True)
x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001))(x)
x = Dropout(0.25)(x, training = True)
x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001))(x)
x = AveragePooling2D(pool_size = (2,2), strides=(2,2))(x)
x = Dropout(0.25)(x, training=True)

# Conv Block 5
x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu')(x)
x = Dropout(0.25)(x, training = True)
x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu')(x)
x = Dropout(0.25)(x, training = True)
x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001))(x)
x = Dropout(0.25)(x, training = True)
x = Conv2D(512, (3, 3), padding = 'same', activation = 'relu', kernel_regularizer = l2(0.001))(x)
x = AveragePooling2D(pool_size = (2,2), strides=(2,2))(x)
x = Dropout(0.25)(x, training=True)

# FC Layers
x = Flatten()(x)
#x = Dense(4096, activation='relu')(x)
#x = Dropout(0.25)(x, training = True)
x = Dense(512, activation='relu')(x)
x = Dropout(0.25)(x, training=True)
x = Dense(256, activation='relu')(x)
x = Dropout(0.25)(x)
output = Dense(7, activation='softmax')(x)

model = keras.Model(inputs, output)
return model

if __name__ == "__main__":
model = fer_vgg()
model.summary()
Loading

0 comments on commit 86b62ca

Please sign in to comment.