-
Notifications
You must be signed in to change notification settings - Fork 29
/
garbage_detection.py
172 lines (121 loc) · 6.08 KB
/
garbage_detection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import os
import argparse
from model.darknet import Darknet
import torch
import logging
from model.util import process_result, load_images, resize_image, cv_image2tensor, transform_result, create_batches,create_output_json, load_data_frame
import math
import pickle as pkl
import os.path as osp
from datetime import datetime
from torch.autograd import Variable
class GarbageImageClassifier:
"""
Classification models
Image to json output with detected objects
"""
def __init__(self,cuda,obj_thresh = 0.5, nms_thresh = 0.4):
curScriptPath = os.path.dirname(os.path.abspath(__file__)) # needed to keep track of the current location of current script ( although it is included somewhere else )
self.cuda = cuda
self.obj_thresh = obj_thresh
self.nms_thresh = nms_thresh
if cuda and not torch.cuda.is_available():
print("ERROR: cuda is not available, try running on CPU")
sys.exit(1)
print('Loading network...')
self.model = Darknet(curScriptPath + "/cfg/yolov3_garb_test.cfg")
self.model.load_weights(curScriptPath + '/weights/garb.weights')
if self.cuda:
self.model.cuda()
self.model.eval()
print('Network loaded')
self.createLogger()
self.logger.info("GarbageImageClassifier: Init")
# ----
def createLogger(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(level=logging.INFO) # SETTING: log level
# logger handlers
handler = logging.StreamHandler()
# handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)-4s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
# ----
def detect_image(self,path,colors=[(39, 129, 113), (164, 80, 133), (83, 122, 114)],classes=['container_small', 'garbage_bag', 'cardboard']):
print('Loading input image(s)...')
input_size = [int(self.model.net_info['height']), int(self.model.net_info['width'])]
batch_size = int(self.model.net_info['batch'])
imlist, imgs = load_images(path)
print('Input image(s) loaded')
img_batches = create_batches(imgs, batch_size)
print('Detecting...')
all_images_attributes = []
for batchi, img_batch in enumerate(img_batches):
start_time = datetime.now()
img_tensors = [cv_image2tensor(img, input_size) for img in img_batch]
img_tensors = torch.stack(img_tensors)
img_tensors = Variable(img_tensors)
if self.cuda:
img_tensors = img_tensors.cuda()
detections = self.model(img_tensors, self.cuda).cpu()
detections = process_result(detections, self.obj_thresh, self.nms_thresh)
if len(detections) == 0:
continue
detections = transform_result(detections, img_batch, input_size)
boxes = []
for detection in detections:
boxes.append(create_output_json(img_batch, detection, colors, classes))
images_attributes = {}
images_attributes['frameMeta'] = {'width':input_size[1],'height':input_size[0]}
images_attributes['detectedObjects'] = boxes
images_attributes['counts'] = {x:0 for x in classes}
images_attributes['counts']['total'] = 0
for box in boxes:
images_attributes['counts'][box['detectedObjectType']] +=1
images_attributes['counts']['total'] +=1
end_time = datetime.now()
print('Detection finished in %s' % (end_time - start_time))
images_attributes['mlDoneAt'] = str(end_time)
images_attributes['mlTimeTaken'] = end_time - start_time
all_images_attributes.append(images_attributes)
return all_images_attributes
# ----
def detect_image_data_frame(self,data_frame,colors=[(39, 129, 113), (164, 80, 133), (83, 122, 114)],classes=['container_small', 'garbage_bag', 'cardboard']):
print('Loading input image(s)...')
input_size = [int(self.model.net_info['height']), int(self.model.net_info['width'])]
batch_size = int(self.model.net_info['batch'])
imgs = [load_data_frame(data_frame)]
print('Input image(s) loaded')
img_batches = create_batches(imgs, batch_size)
print('Detecting...')
all_images_attributes = []
for batchi, img_batch in enumerate(img_batches):
start_time = datetime.now()
img_tensors = [cv_image2tensor(img, input_size) for img in img_batch]
img_tensors = torch.stack(img_tensors)
img_tensors = Variable(img_tensors)
if self.cuda:
img_tensors = img_tensors.cuda()
detections = self.model(img_tensors, self.cuda).cpu()
detections = process_result(detections, self.obj_thresh, self.nms_thresh)
if len(detections) == 0:
continue
detections = transform_result(detections, img_batch, input_size)
boxes = []
for detection in detections:
boxes.append(create_output_json(img_batch, detection, colors, classes))
images_attributes = {}
images_attributes['frameMeta'] = {'width':input_size[1],'height':input_size[0]}
images_attributes['detectedObjects'] = boxes
images_attributes['counts'] = {x:0 for x in classes}
images_attributes['counts']['total'] = 0
for box in boxes:
images_attributes['counts'][box['detectedObjectType']] +=1
images_attributes['counts']['total'] +=1
end_time = datetime.now()
print('Detection finished in %s' % (end_time - start_time))
images_attributes['mlDoneAt'] = str(end_time)
images_attributes['mlTimeTaken'] = end_time - start_time
all_images_attributes.append(images_attributes)
return all_images_attributes