質問内容
画像認識を行うCNNを自作したのですがこれを用いてカメラの映像をつかった画像認識を行いたいと考えています。
そのために調べたサイト(リンク内容)にあったdarknetのサンプルコードを参考にさせていただこうと思ったのですが、どこをどのように変更すれば自作のCNNを利用できるのかわかりませんでした。
もし知っている方がいらっしゃったら教えていただけないでしょうか?
それともこのサンプルソースコードを利用する方法ではできないのでしょうか?
自作のCNN
python
1import keras 2import pickle 3import pandas as pd 4import numpy as np 5import seaborn as sns 6from keras import models 7from keras import layers 8from keras import optimizers 9from pandas import Series,DataFrame 10from keras.utils import to_categorical 11from keras.optimizers import RMSprop 12from keras.preprocessing.image import ImageDataGenerator 13from sklearn.preprocessing import LabelBinarizer 14 15#csv読み込み 16sign_train = pd.read_csv("sign_mnist_train.csv",sep=",") 17lb=LabelBinarizer() 18 19#特訓データ 20train_data=sign_train.drop(['label'],axis=1) 21train_data = train_data.values.reshape(-1,28,28,1) 22train_data=train_data.astype('float32')/255 23train_label=sign_train['label'] 24train_label=lb.fit_transform(train_label) 25 26#モデル 27model=models.Sequential() 28model.add(layers.Conv2D(32,(3,3),activation='relu',input_shape=(28,28,1))) 29model.add(layers.MaxPooling2D((2,2))) 30model.add(layers.Conv2D(64,(3,3),activation='relu')) 31model.add(layers.MaxPooling2D((2,2))) 32model.add(layers.Conv2D(64,(3,3),activation='relu')) 33model.add(layers.Flatten()) 34model.add(layers.Dense(64,activation='relu')) 35model.add(layers.Dropout(0.1)) 36model.add(layers.Dense(64,activation='relu')) 37model.add(layers.Dropout(0.1)) 38model.add(layers.Dense(24,activation='softmax')) 39 40#学習の取り決め 41model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['acc']) 42 43#学習 44history=model.fit(train_data,train_label,epochs=20,batch_size=200) 45 46#保存 47with open('model.pickle', mode='wb') as f: 48 pickle.dump(model, f)
サイトのサンプルソースコード
python
1from __future__ import division 2import time 3import torch 4import torch.nn as nn 5from torch.autograd import Variable 6import numpy as np 7import cv2 8from util import * 9from darknet import Darknet 10from preprocess import prep_image, inp_to_image 11import pandas as pd 12import random 13import argparse 14import pickle as pkl 15 16def prep_image(img, inp_dim): 17 """ 18 Prepare image for inputting to the neural network. 19 20 Returns a Variable 21 """ 22 23 orig_im = img 24 dim = orig_im.shape[1], orig_im.shape[0] 25 img = cv2.resize(orig_im, (inp_dim, inp_dim)) 26 img_ = img[:,:,::-1].transpose((2,0,1)).copy() 27 img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0) 28 return img_, orig_im, dim 29 30def write(x, img): 31 c1 = tuple(x[1:3].int()) 32 c2 = tuple(x[3:5].int()) 33 cls = int(x[-1]) 34 label = "{0}".format(classes[cls]) 35 color = random.choice(colors) 36 cv2.rectangle(img, c1, c2,color, 1) 37 t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0] 38 c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4 39 cv2.rectangle(img, c1, c2,color, -1) 40 cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1); 41 return img 42 43def arg_parse(): 44 """ 45 Parse arguements to the detect module 46 47 """ 48 49 50 parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo') 51 parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.25) 52 parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4) 53 parser.add_argument("--reso", dest = 'reso', help = 54 "Input resolution of the network. Increase to increase accuracy. Decrease to increase speed", 55 default = "160", type = str) 56 return parser.parse_args() 57 58 59 60if __name__ == '__main__': 61 cfgfile = "cfg/yolov3.cfg" 62 weightsfile = "yolov3.weights" 63 num_classes = 80 64 65 args = arg_parse() 66 confidence = float(args.confidence) 67 nms_thesh = float(args.nms_thresh) 68 start = 0 69 CUDA = torch.cuda.is_available() 70 71 72 73 74 num_classes = 80 75 bbox_attrs = 5 + num_classes 76 77 model = Darknet(cfgfile) 78 model.load_weights(weightsfile) 79 80 model.net_info["height"] = args.reso 81 inp_dim = int(model.net_info["height"]) 82 83 assert inp_dim % 32 == 0 84 assert inp_dim > 32 85 86 if CUDA: 87 model.cuda() 88 89 model.eval() 90 91 videofile = 'video.avi' 92 93 cap = cv2.VideoCapture(0) 94 95 assert cap.isOpened(), 'Cannot capture source' 96 97 frames = 0 98 start = time.time() 99 while cap.isOpened(): 100 101 ret, frame = cap.read() 102 if ret: 103 104 img, orig_im, dim = prep_image(frame, inp_dim) 105 106# im_dim = torch.FloatTensor(dim).repeat(1,2) 107 108 109 if CUDA: 110 im_dim = im_dim.cuda() 111 img = img.cuda() 112 113 114 output = model(Variable(img), CUDA) 115 output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh) 116 117 if type(output) == int: 118 frames += 1 119 print("FPS of the video is {:5.2f}".format( frames / (time.time() - start))) 120 cv2.imshow("frame", orig_im) 121 key = cv2.waitKey(1) 122 if key & 0xFF == ord('q'): 123 break 124 continue 125 126 127 128 output[:,1:5] = torch.clamp(output[:,1:5], 0.0, float(inp_dim))/inp_dim 129 130# im_dim = im_dim.repeat(output.size(0), 1) 131 output[:,[1,3]] *= frame.shape[1] 132 output[:,[2,4]] *= frame.shape[0] 133 134 135 classes = load_classes('data/coco.names') 136 colors = pkl.load(open("pallete", "rb")) 137 138 list(map(lambda x: write(x, orig_im), output)) 139 140 141 cv2.imshow("frame", orig_im) 142 key = cv2.waitKey(1) 143 if key & 0xFF == ord('q'): 144 break 145 frames += 1 146 print("FPS of the video is {:5.2f}".format( frames / (time.time() - start))) 147 148 149 else: 150 break
回答1件
あなたの回答
tips
プレビュー