Commit 50f8415a authored by YONG-LIN SU's avatar YONG-LIN SU

demo new version

parent 564d3055
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tkinter as tk\n",
"import cv2\n",
"from PIL import Image,ImageTk\n",
"import time\n",
"from datetime import datetime \n",
"from face_predict import *\n",
"from influxdb import InfluxDBClient"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Set up GUI\n",
"window = tk.Tk() #Makes main window\n",
"window.wm_title(\"人臉辨識打卡系統\")\n",
"window.config(background=\"#FFFFFF\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Graphics window\n",
"imageFrame = tk.Frame(window, width=600, height=500)\n",
"imageFrame.grid(row=1, column=0,rowspan=4, padx=10, pady=2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Capture video frames\n",
"lmain = tk.Label(imageFrame)\n",
"lmain.grid(row=0, column=0)\n",
"cap = cv2.VideoCapture(0)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# result\n",
"status_label=tk.Label(window,text='歡迎使用本系統',font=(\"標楷體\", 20))\n",
"status_label.grid(row=0,column=0,columnspan=3)\n",
"time_label=tk.Label(window,text='時間:',font=(\"標楷體\", 20))\n",
"time_label.grid(row=1,column=1)\n",
"t=tk.Label(window,text='2019-06-19',font=(\"Times New Roman\", 20))\n",
"t.grid(row=2,column=1)\n",
"name_label=tk.Label(window,text='姓名: ',font=(\"標楷體\", 20))\n",
"name_label.grid(row=3,column=1)\n",
"name=tk.Label(window,text='Allen',font=(\"Times New Roman\", 20))\n",
"name.grid(row=4,column=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def clock(status):\n",
" if(status=='login_'):\n",
" msg='開始上班辨識打卡'\n",
" else:\n",
" msg='開始下班辨識打卡'\n",
" status_label['text']=msg\n",
" login_time=time.time()\n",
" ret,frame=cap.read()\n",
" if(ret):\n",
" frame = cv2.flip(frame, 1)\n",
" result=infer(le,clf,frame)\n",
" if(len(result)==2):\n",
" status_label['text']='辨識完成 請確認資料'\n",
" t['text']=time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(login_time))\n",
" name['text']=str(result[1])\n",
" login_button.configure(text=\"是\",command=lambda:submit(status,result[1],login_time))\n",
" logout_button.configure(text=\"否\",command=retry)\n",
" else:\n",
" status_label['text']='偵測不到請重新調整位置' \n",
"\n",
"def submit(measurments,name,time):\n",
" if(measurments=='login_'):\n",
" msg='上班加油!!'\n",
" influxdb_insert(measurments,name,time)\n",
" else:\n",
" msg='上班辛苦了 下班好好休息吧!!'\n",
" status_label['text']=msg\n",
" status_label.after(3000,func=empty_result)\n",
"def empty_result():\n",
" status_label['text']='歡迎使用本系統'\n",
" t['text']=''\n",
" name['text']=''\n",
" login_button.configure(text=\"上班打卡\",command=lambda:clock('login_'))\n",
" logout_button.configure(text=\"下班打卡\",command=lambda:clock('logout_'))\n",
"def retry():\n",
" error_window=tk.Toplevel(window)\n",
" error_window.geometry('300x200')\n",
" error_window.title('請輸入您的姓名 以利後續訓練')\n",
" tk.Label(error_window,text='姓名: ',font=(\"標楷體\", 20)).grid(row=0,column=0)\n",
" currect_name=tk.StringVar()\n",
" entry_currect_name = tk.Entry(error_window, textvariable=currect_name)\n",
" entry_currect_name.grid(row=0,column=1)\n",
" button_submit=tk.Button(error_window,text='提交',font=(\"標楷體\", 20))\n",
" button_submit.grid(row=1,column=0,columnspan=2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def influxdb_insert(measurments,name,time):\n",
" # 建立資料庫連線\n",
" client=InfluxDBClient(host='192.168.5.17',port=8086,username='root',password='root',database='RD')\n",
" json_body = [\n",
" {\n",
" \"measurement\": measurments,\n",
" \"time\": datetime.utcfromtimestamp(time),\n",
" \"fields\": {\n",
" \"name\": name[0],\n",
" 'snapshot':name[0]+'.jpg'\n",
" }\n",
" }\n",
" ]\n",
" client.write_points(json_body)\n",
" client.close()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# button\n",
"login_button= tk.Button(window,text=\"上班打卡\",command=lambda:clock('login_'),font=(\"標楷體\", 20))\n",
"login_button.grid(row=5,column=0)\n",
"logout_button= tk.Button(window,text=\"下班打卡\",command=lambda:clock('logout_'),font=(\"標楷體\", 20))\n",
"logout_button.grid(row=5,column=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def show_frame():\n",
" _, frame = cap.read()\n",
" frame = cv2.flip(frame, 1)\n",
" \n",
" faces = face_cascade.detectMultiScale(frame,scaleFactor=1.1,minNeighbors=3)\n",
" if(len(faces)>0):\n",
" for f in faces:\n",
" x,y,w,h=f\n",
" margin=10\n",
" cv2.rectangle(frame,(x-margin//2,y-margin//2),(x+w+margin//2,y+h+margin//2),(0,0,255))\n",
" cv2.putText(frame,time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()),(0,30)\n",
" ,cv2.FONT_HERSHEY_TRIPLEX,1, (0, 255, 255), 1, cv2.LINE_AA) \n",
" cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n",
" img = Image.fromarray(cv2image)\n",
" imgtk = ImageTk.PhotoImage(image=img)\n",
" lmain.imgtk = imgtk\n",
" lmain.configure(image=imgtk)\n",
" lmain.after(10, show_frame)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"show_frame() #Display 2\n",
"window.mainloop() #Starts GUI"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Slider window (slider controls stage position)\n",
"# sliderFrame = tk.Frame(window, width=600, height=100)\n",
"# sliderFrame.grid(row = 600, column=0, padx=10, pady=2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"img=cv2.imread('../data/Test/test1.jpg')\n",
"cv2img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n",
"imgtk=ImageTk.PhotoImage(Image.fromarray(cv2img))\n",
"lmain.imgtk = imgtk\n",
"lmain.configure(image=imgtk)\n",
"root.mainloop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from influxdb import InfluxDBClient"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 建立資料庫連線\n",
"client=InfluxDBClient(host='192.168.5.17',port=8086,username='root',password='root',database='RD')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"json_body = [\n",
" {\n",
" \"measurement\": \"login_\",\n",
" \"time\": datetime.utcfromtimestamp(time.time()),\n",
" \"fields\": {\n",
" \"name\": 'test2',\n",
" 'snapshot':\"test.jpg\"\n",
" }\n",
" }\n",
"]\n",
"client.write_points(json_body)\n",
"client.close()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from datetime import datetime"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"datetime.utcfromtimestamp(time.time())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time()))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"from sklearn.externals import joblib\n",
"from skimage.transform import resize\n",
"from sklearn.preprocessing import LabelEncoder\n",
"from keras.models import model_from_json\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 載入人臉偵測cascade分類器\n",
"face_cascade = cv2.CascadeClassifier(\"../model/cv2/haarcascade_frontalface_alt2.xml\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 載入Facenet預測模型\n",
"model=model_from_json(open(\"../model/keras/facenet_model.json\",\"r\").read())\n",
"model.load_weights(\"../model/keras/facenet_weights.h5\")\n",
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 載入SVM分類器\n",
"clf=joblib.load('../model/20190618224324/20190618224324.pkl')\n",
"# 載入LabelEncoder\n",
"le=LabelEncoder()\n",
"le.classes_ =np.load('../model/20190618224324/classes.npy')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 影像預處理\n",
"def prewhiten(x):\n",
" if x.ndim == 4:\n",
" axis = (1, 2, 3)\n",
" size = x[0].size\n",
" elif x.ndim == 3:\n",
" axis = (0, 1, 2)\n",
" size = x.size\n",
" else:\n",
" print(x.ndim)\n",
" raise ValueError('Dimension should be 3 or 4')\n",
"\n",
" mean = np.mean(x, axis=axis, keepdims=True)\n",
" std = np.std(x, axis=axis, keepdims=True)\n",
" std_adj = np.maximum(std, 1.0/np.sqrt(size))\n",
" y = (x - mean) / std_adj\n",
" return y\n",
"\n",
"def l2_normalize(x, axis=-1, epsilon=1e-10):\n",
" output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))\n",
" return output"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 人臉偵測處理回傳結果矩陣\n",
"image_size=160\n",
"def face_cropped(img,faces, margin): \n",
" aligned_images = []\n",
" for f in faces:\n",
" (x, y, w, h) = f\n",
" cropped = img[y-margin//2:y+h+margin//2,x-margin//2:x+w+margin//2, :]\n",
" aligned = resize(cropped, (image_size, image_size), mode='reflect')\n",
" aligned_images.append(aligned)\n",
" \n",
" return np.array(aligned_images)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 取得人臉Facenet預測之特徵值\n",
"def calc_embs(faces, margin=10, batch_size=1):\n",
" aligned_images = prewhiten(faces)\n",
" pd = []\n",
" for start in range(0, len(aligned_images), batch_size):\n",
" pd.append(model.predict_on_batch(aligned_images[start:start+batch_size]))\n",
" embs = l2_normalize(np.concatenate(pd))\n",
"\n",
" return embs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 人臉辨識推斷\n",
"def infer(le, clf, img):\n",
" faces = face_cascade.detectMultiScale(img,scaleFactor=1.1,minNeighbors=3)\n",
" if(len(faces)==0):\n",
" return '偵測不到人臉請重新調整'\n",
" \n",
" embs = calc_embs(face_cropped(img,faces,10))\n",
"# pred = le.inverse_transform(clf.predict(embs))\n",
" pred=get_labels(le,clf,embs)\n",
" return [faces,pred]\n",
"# Labels 解析\n",
"def get_labels(le,clf,embs):\n",
" socres=clf.predict_proba(embs)\n",
" print(socres)\n",
" results=[]\n",
" for s in socres:\n",
" if(s[s.argmax()]>0.5):\n",
" results.append(le.inverse_transform([s.argmax()])[0])\n",
" else:\n",
" results.append('Unknow')\n",
" return results"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"img=cv2.imread('../data/Test/Kevin04.jpg')\n",
"# img2=cv2.imread('../data/Test/test6.jpg')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"result=infer(le,clf,img)\n",
"# result2=infer(le,clf,img2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 顯示結果\n",
"if(len(result)==2): \n",
" fa=result[0]\n",
" na=result[1]\n",
" for f,n in zip(fa,na):\n",
" x,y,w,h=f\n",
" margin=10\n",
" cv2.rectangle(img,(x-margin//2,y-margin//2),(x+w+margin//2,y+h+margin//2),(0,0,255))\n",
" cv2.putText(img, n, (x-margin//2,y-margin//2), cv2.FONT_HERSHEY_PLAIN,1, (0, 255, 255), 1, cv2.LINE_AA)\n",
" cv2.imshow('frame',img)\n",
" cv2.waitKey(0)\n",
" cv2.destroyAllWindows()\n",
"else:\n",
" print(result)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"faces = face_cascade.detectMultiScale(img,scaleFactor=1.1,minNeighbors=3)\n",
"if(len(faces)!=0):\n",
" err,embs = calc_embs(face_cropped(faces,10))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"socre=clf.predict_proba(embs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"get_labels(le,clf,embs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fa=result1[0]\n",
"na=result1[1]\n",
"print(f,n)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for f,n in zip(fa,na):\n",
" print(str(f) + n)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"from sklearn.svm import SVC\n",
"from sklearn.preprocessing import LabelEncoder\n",
"from skimage.transform import resize\n",
"from sklearn.externals import joblib\n",
"import os\n",
"import numpy as np\n",
"from keras.models import model_from_json\n",
"import time"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# loading facenet model\n",
"model=model_from_json(open(\"../model/keras/facenet_model.json\",\"r\").read())\n",
"model.load_weights(\"../model/keras/facenet_weights.h5\")\n",
"# model.summary()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 人臉偵測casecade分類器\n",
"cascade_path = '../model/cv2/haarcascade_frontalface_alt2.xml'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 訓練圖片路徑\n",
"image_dir_basepath=\"../data/images/\"\n",
"image_size = 160\n",
"names=[]\n",
"for f in os.listdir(os.path.abspath(image_dir_basepath)):\n",
" if(os.path.isdir(os.path.join(os.path.abspath(image_dir_basepath),f))):\n",
" names.append(f)\n",
"print(names)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 影像預處理\n",
"def prewhiten(x):\n",
" if x.ndim == 4:\n",
" axis = (1, 2, 3)\n",
" size = x[0].size\n",
" elif x.ndim == 3:\n",
" axis = (0, 1, 2)\n",
" size = x.size\n",
" else:\n",
" print(x.ndim)\n",
" raise ValueError('Dimension should be 3 or 4')\n",
"\n",
" mean = np.mean(x, axis=axis, keepdims=True)\n",
" std = np.std(x, axis=axis, keepdims=True)\n",
" std_adj = np.maximum(std, 1.0/np.sqrt(size))\n",
" y = (x - mean) / std_adj\n",
" return y\n",
"\n",
"def l2_normalize(x, axis=-1, epsilon=1e-10):\n",
" output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))\n",
" return output"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 人臉偵測處理回傳結果矩陣\n",
"def load_and_align_images(filepaths, margin):\n",
" cascade = cv2.CascadeClassifier(cascade_path)\n",
" \n",
" aligned_images = []\n",
" for filepath in filepaths:\n",
" img = cv2.imread(filepath)\n",
"\n",
" faces = cascade.detectMultiScale(img,\n",
" scaleFactor=1.1,\n",
" minNeighbors=3)\n",
" if(len(faces)==0):\n",
" continue\n",
" (x, y, w, h) = faces[0]\n",
" cropped = img[y-margin//2:y+h+margin//2,\n",
" x-margin//2:x+w+margin//2, :]\n",
" aligned = resize(cropped, (image_size, image_size), mode='reflect')\n",
" aligned_images.append(aligned)\n",
" \n",
" return np.array(aligned_images)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 取得人臉Facenet預測之特徵值\n",
"def calc_embs(filepaths, margin=10, batch_size=1):\n",
" aligned_images = prewhiten(load_and_align_images(filepaths, margin))\n",
" pd = []\n",
" for start in range(0, len(aligned_images), batch_size):\n",
" pd.append(model.predict_on_batch(aligned_images[start:start+batch_size]))\n",
" embs = l2_normalize(np.concatenate(pd))\n",
"\n",
" return embs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 人臉辨識訓練主程序\n",
"def train(dir_basepath, names, max_num_img=10):\n",
" labels = []\n",
" embs = []\n",
" for name in names:\n",
" dirpath = os.path.abspath(dir_basepath + name)\n",
" filepaths = [os.path.join(dirpath, f) for f in os.listdir(dirpath)][:max_num_img]\n",
" embs_ = calc_embs(filepaths) \n",
" labels.extend([name] * len(embs_))\n",
" embs.append(embs_)\n",
" \n",
" embs = np.concatenate(embs)\n",
" le = LabelEncoder().fit(labels)\n",
" y = le.transform(labels)\n",
" clf = SVC(kernel='linear', probability=True).fit(embs, y)\n",
" return le, clf"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 人臉辨識推斷\n",
"def infer(le, clf, filepaths):\n",
" embs = calc_embs(filepaths)\n",
" pred = le.inverse_transform(clf.predict(embs))\n",
" return embs,pred"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"le, clf = train(image_dir_basepath, names)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 模型保存SVM與laberencoder\n",
"model_name=time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n",
"full_path=os.path.join(os.path.abspath(\"../model/\"),model_name)\n",
"if(not os.path.exists(full_path)):\n",
" os.mkdir(full_path)\n",
"else:\n",
" print('is exist')\n",
"joblib.dump(clf,os.path.join(full_path,model_name+'.pkl'))\n",
"np.save(os.path.join(full_path,'classes.npy'),le.classes_)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 寫入所有classes以便查閱\n",
"f=open(os.path.join(full_path,'labels.txt'),'w')\n",
"for label in le.classes_:\n",
" f.write(label+'\\n')\n",
"f.close()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tkinter as tk\n",
"import cv2\n",
"from PIL import Image,ImageTk\n",
"import time\n",
"from datetime import datetime \n",
"from face_predict import *\n",
"from influxdb import InfluxDBClient"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Set up GUI\n",
"window = tk.Tk() #Makes main window\n",
"window.wm_title(\"人臉辨識打卡系統\")\n",
"window.config(background=\"#FFFFFF\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Graphics window\n",
"imageFrame = tk.Frame(window, width=600, height=500)\n",
"imageFrame.grid(row=1, column=0,rowspan=4, padx=10, pady=2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Capture video frames\n",
"lmain = tk.Label(imageFrame)\n",
"lmain.grid(row=0, column=0)\n",
"cap = cv2.VideoCapture(0)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# result\n",
"status_label=tk.Label(window,text='歡迎使用本系統',font=(\"標楷體\", 20))\n",
"status_label.grid(row=0,column=0,columnspan=3)\n",
"time_label=tk.Label(window,text='時間:',font=(\"標楷體\", 20))\n",
"time_label.grid(row=1,column=1)\n",
"t=tk.Label(window,text='2019-06-19',font=(\"Times New Roman\", 20))\n",
"t.grid(row=2,column=1)\n",
"name_label=tk.Label(window,text='姓名: ',font=(\"標楷體\", 20))\n",
"name_label.grid(row=3,column=1)\n",
"name=tk.Label(window,text='Allen',font=(\"Times New Roman\", 20))\n",
"name.grid(row=4,column=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def clock(status):\n",
" if(status=='login_'):\n",
" msg='開始上班辨識打卡'\n",
" else:\n",
" msg='開始下班辨識打卡'\n",
" status_label['text']=msg\n",
" login_time=time.time()\n",
" ret,frame=cap.read()\n",
" if(ret):\n",
" frame = cv2.flip(frame, 1)\n",
" result=infer(le,clf,frame)\n",
" if(len(result)==2):\n",
" status_label['text']='辨識完成 請確認資料'\n",
" t['text']=time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(login_time))\n",
" name['text']=str(result[1])\n",
" login_button.configure(text=\"是\",command=lambda:submit(status,result[1],login_time))\n",
" logout_button.configure(text=\"否\",command=retry)\n",
" else:\n",
" status_label['text']='偵測不到請重新調整位置' \n",
"\n",
"def submit(measurments,name,time):\n",
" if(measurments=='login_'):\n",
" msg='上班加油!!'\n",
" influxdb_insert(measurments,name,time)\n",
" else:\n",
" msg='上班辛苦了 下班好好休息吧!!'\n",
" status_label['text']=msg\n",
" status_label.after(3000,func=empty_result)\n",
"def empty_result():\n",
" status_label['text']='歡迎使用本系統'\n",
" t['text']=''\n",
" name['text']=''\n",
" login_button.configure(text=\"上班打卡\",command=lambda:clock('login_'))\n",
" logout_button.configure(text=\"下班打卡\",command=lambda:clock('logout_'))\n",
"def retry():\n",
" error_window=tk.Toplevel(window)\n",
" error_window.geometry('300x200')\n",
" error_window.title('請輸入您的姓名 以利後續訓練')\n",
" tk.Label(error_window,text='姓名: ',font=(\"標楷體\", 20)).grid(row=0,column=0)\n",
" currect_name=tk.StringVar()\n",
" entry_currect_name = tk.Entry(error_window, textvariable=currect_name)\n",
" entry_currect_name.grid(row=0,column=1)\n",
" button_submit=tk.Button(error_window,text='提交',font=(\"標楷體\", 20))\n",
" button_submit.grid(row=1,column=0,columnspan=2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def influxdb_insert(measurments,name,time):\n",
" # 建立資料庫連線\n",
" client=InfluxDBClient(host='192.168.5.17',port=8086,username='root',password='root',database='RD')\n",
" json_body = [\n",
" {\n",
" \"measurement\": measurments,\n",
" \"time\": datetime.utcfromtimestamp(time),\n",
" \"fields\": {\n",
" \"name\": name[0],\n",
" 'snapshot':name[0]+'.jpg'\n",
" }\n",
" }\n",
" ]\n",
" client.write_points(json_body)\n",
" client.close()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# button\n",
"login_button= tk.Button(window,text=\"上班打卡\",command=lambda:clock('login_'),font=(\"標楷體\", 20))\n",
"login_button.grid(row=5,column=0)\n",
"logout_button= tk.Button(window,text=\"下班打卡\",command=lambda:clock('logout_'),font=(\"標楷體\", 20))\n",
"logout_button.grid(row=5,column=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def show_frame():\n",
" _, frame = cap.read()\n",
" frame = cv2.flip(frame, 1)\n",
" \n",
" faces = face_cascade.detectMultiScale(frame,scaleFactor=1.1,minNeighbors=3)\n",
" if(len(faces)>0):\n",
" for f in faces:\n",
" x,y,w,h=f\n",
" margin=10\n",
" cv2.rectangle(frame,(x-margin//2,y-margin//2),(x+w+margin//2,y+h+margin//2),(0,0,255))\n",
" cv2.putText(frame,time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()),(0,30)\n",
" ,cv2.FONT_HERSHEY_TRIPLEX,1, (0, 255, 255), 1, cv2.LINE_AA) \n",
" cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n",
" img = Image.fromarray(cv2image)\n",
" imgtk = ImageTk.PhotoImage(image=img)\n",
" lmain.imgtk = imgtk\n",
" lmain.configure(image=imgtk)\n",
" lmain.after(10, show_frame)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"show_frame() #Display 2\n",
"window.mainloop() #Starts GUI"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Slider window (slider controls stage position)\n",
"# sliderFrame = tk.Frame(window, width=600, height=100)\n",
"# sliderFrame.grid(row = 600, column=0, padx=10, pady=2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"img=cv2.imread('../data/Test/test1.jpg')\n",
"cv2img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n",
"imgtk=ImageTk.PhotoImage(Image.fromarray(cv2img))\n",
"lmain.imgtk = imgtk\n",
"lmain.configure(image=imgtk)\n",
"root.mainloop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from influxdb import InfluxDBClient"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 建立資料庫連線\n",
"client=InfluxDBClient(host='192.168.5.17',port=8086,username='root',password='root',database='RD')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"json_body = [\n",
" {\n",
" \"measurement\": \"login_\",\n",
" \"time\": datetime.utcfromtimestamp(time.time()),\n",
" \"fields\": {\n",
" \"name\": 'test2',\n",
" 'snapshot':\"test.jpg\"\n",
" }\n",
" }\n",
"]\n",
"client.write_points(json_body)\n",
"client.close()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from datetime import datetime"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"datetime.utcfromtimestamp(time.time())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time()))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
# coding: utf-8 # coding: utf-8
# In[ ]: # In[1]:
import tkinter as tk import tkinter as tk
...@@ -11,9 +11,10 @@ import time ...@@ -11,9 +11,10 @@ import time
from datetime import datetime from datetime import datetime
from face_predict import * from face_predict import *
from influxdb import InfluxDBClient from influxdb import InfluxDBClient
import os
# In[ ]: # In[2]:
#Set up GUI #Set up GUI
...@@ -22,7 +23,7 @@ window.wm_title("人臉辨識打卡系統") ...@@ -22,7 +23,7 @@ window.wm_title("人臉辨識打卡系統")
window.config(background="#FFFFFF") window.config(background="#FFFFFF")
# In[ ]: # In[3]:
#Graphics window #Graphics window
...@@ -30,7 +31,7 @@ imageFrame = tk.Frame(window, width=600, height=500) ...@@ -30,7 +31,7 @@ imageFrame = tk.Frame(window, width=600, height=500)
imageFrame.grid(row=1, column=0,rowspan=4, padx=10, pady=2) imageFrame.grid(row=1, column=0,rowspan=4, padx=10, pady=2)
# In[ ]: # In[4]:
#Capture video frames #Capture video frames
...@@ -39,7 +40,7 @@ lmain.grid(row=0, column=0) ...@@ -39,7 +40,7 @@ lmain.grid(row=0, column=0)
cap = cv2.VideoCapture(0) cap = cv2.VideoCapture(0)
# In[ ]: # In[5]:
# result # result
...@@ -55,7 +56,7 @@ name=tk.Label(window,text='Allen',font=("Times New Roman", 20)) ...@@ -55,7 +56,7 @@ name=tk.Label(window,text='Allen',font=("Times New Roman", 20))
name.grid(row=4,column=1) name.grid(row=4,column=1)
# In[ ]: # In[6]:
def clock(status): def clock(status):
...@@ -69,30 +70,39 @@ def clock(status): ...@@ -69,30 +70,39 @@ def clock(status):
if(ret): if(ret):
frame = cv2.flip(frame, 1) frame = cv2.flip(frame, 1)
result=infer(le,clf,frame) result=infer(le,clf,frame)
if(len(result)==2): if(len(result)==3):
status_label['text']='辨識完成 請確認資料' status_label['text']='辨識完成 請確認資料'
t['text']=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(login_time)) t['text']=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(login_time))
name['text']=str(result[1]) name['text']=str(result[1]+result[2])
login_button.configure(text="是",command=lambda:submit(status,result[1],login_time)) login_button.configure(text="是",command=lambda:submit(frame,status,result[1],login_time))
logout_button.configure(text="否",command=retry) logout_button.configure(text="否",command=lambda:retry(frame,status,result[1],login_time))
else: else:
status_label['text']='偵測不到請重新調整位置' status_label['text']='偵測不到請重新調整位置'
def submit(measurments,name,time): def submit(frame,measurments,name,t):
if(measurments=='login_'): if(measurments=='login_'):
msg='上班加油!!' msg='上班加油!!'
influxdb_insert(measurments,name,time)
else: else:
msg='上班辛苦了 下班好好休息吧!!' msg='上班辛苦了 下班好好休息吧!!'
influxdb_insert(measurments,name[0],t)
cv2.imwrite(os.path.abspath(os.path.join('../save/',name[0],measurments,time.strftime("%Y-%m-%d-%H%M%S",time.localtime(t))+'.jpg')),frame)
status_label['text']=msg status_label['text']=msg
status_label.after(3000,func=empty_result) status_label.after(3000,func=empty_result)
def empty_result(): def empty_result():
status_label['text']='歡迎使用本系統' status_label['text']='歡迎使用本系統'
t['text']='' t['text']=''
name['text']='' name['text']=''
login_button.configure(text="上班打卡",command=lambda:clock('login_')) login_button.configure(text="上班打卡",command=lambda:clock('login_'))
logout_button.configure(text="下班打卡",command=lambda:clock('logout_')) logout_button.configure(text="下班打卡",command=lambda:clock('logout_'))
def retry():
# In[7]:
# 錯誤處理
def retry(frame,measurments,name,t):
error_window=tk.Toplevel(window) error_window=tk.Toplevel(window)
error_window.geometry('300x200') error_window.geometry('300x200')
error_window.title('請輸入您的姓名 以利後續訓練') error_window.title('請輸入您的姓名 以利後續訓練')
...@@ -100,23 +110,28 @@ def retry(): ...@@ -100,23 +110,28 @@ def retry():
currect_name=tk.StringVar() currect_name=tk.StringVar()
entry_currect_name = tk.Entry(error_window, textvariable=currect_name) entry_currect_name = tk.Entry(error_window, textvariable=currect_name)
entry_currect_name.grid(row=0,column=1) entry_currect_name.grid(row=0,column=1)
button_submit=tk.Button(error_window,text='提交',font=("標楷體", 20)) button_submit=tk.Button(error_window,text='提交',font=("標楷體", 20),command=lambda:retry_ok(frame,error_window,measurments,entry_currect_name.get(),t))
button_submit.grid(row=1,column=0,columnspan=2) button_submit.grid(row=1,column=0,columnspan=2)
def retry_ok(frame,error_window,measurments,name,t):
error_window.destroy()
influxdb_insert(measurments,name,t)
cv2.imwrite(os.path.abspath(os.path.join('../save/',name,'error_',time.strftime("%Y-%m-%d-%H%M%S",time.localtime(t))+'.jpg')),frame)
empty_result()
# In[ ]: # In[8]:
def influxdb_insert(measurments,name,time): def influxdb_insert(measurments,name,t):
# 建立資料庫連線 # 建立資料庫連線
client=InfluxDBClient(host='192.168.5.17',port=8086,username='root',password='root',database='RD') client=InfluxDBClient(host='192.168.5.17',port=8086,username='root',password='root',database='RD')
json_body = [ json_body = [
{ {
"measurement": measurments, "measurement": measurments,
"time": datetime.utcfromtimestamp(time), "time": datetime.utcfromtimestamp(t),
"fields": { "fields": {
"name": name[0], "name": name,
'snapshot':name[0]+'.jpg' 'snapshot':name+'.jpg'
} }
} }
] ]
...@@ -124,7 +139,7 @@ def influxdb_insert(measurments,name,time): ...@@ -124,7 +139,7 @@ def influxdb_insert(measurments,name,time):
client.close() client.close()
# In[ ]: # In[9]:
# button # button
...@@ -134,7 +149,7 @@ logout_button= tk.Button(window,text="下班打卡",command=lambda:clock('logout ...@@ -134,7 +149,7 @@ logout_button= tk.Button(window,text="下班打卡",command=lambda:clock('logout
logout_button.grid(row=5,column=1) logout_button.grid(row=5,column=1)
# In[ ]: # In[10]:
def show_frame(): def show_frame():
...@@ -157,7 +172,7 @@ def show_frame(): ...@@ -157,7 +172,7 @@ def show_frame():
lmain.after(10, show_frame) lmain.after(10, show_frame)
# In[ ]: # In[11]:
show_frame() #Display 2 show_frame() #Display 2
...@@ -228,5 +243,26 @@ datetime.utcfromtimestamp(time.time()) ...@@ -228,5 +243,26 @@ datetime.utcfromtimestamp(time.time())
# In[ ]: # In[ ]:
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) time.strftime("%Y-%m-%d-%H%M%S", time.localtime(time.time()))
# In[ ]:
with open('../model/20190624135811//labels.txt') as f:
lines = f.readlines()
# In[ ]:
for i in range(len(lines)):
name=lines[i].split('\n')[0]
save_path=os.path.abspath('../save/')
name_path=os.path.join(save_path,name)
if(not os.path.isdir(name_path)):
os.mkdir(name_path)
os.mkdir(os.path.join(name_path,'login_'))
os.mkdir(os.path.join(name_path,'logout_'))
os.mkdir(os.path.join(name_path,'error_'))
This diff is collapsed.
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"from sklearn.externals import joblib\n",
"from skimage.transform import resize\n",
"from sklearn.preprocessing import LabelEncoder\n",
"from keras.models import model_from_json\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 載入人臉偵測cascade分類器\n",
"face_cascade = cv2.CascadeClassifier(\"../model/cv2/haarcascade_frontalface_alt2.xml\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 載入Facenet預測模型\n",
"model=model_from_json(open(\"../model/keras/facenet_model.json\",\"r\").read())\n",
"model.load_weights(\"../model/keras/facenet_weights.h5\")\n",
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 載入SVM分類器\n",
"clf=joblib.load('../model/20190618224324/20190618224324.pkl')\n",
"# 載入LabelEncoder\n",
"le=LabelEncoder()\n",
"le.classes_ =np.load('../model/20190618224324/classes.npy')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 影像預處理\n",
"def prewhiten(x):\n",
" if x.ndim == 4:\n",
" axis = (1, 2, 3)\n",
" size = x[0].size\n",
" elif x.ndim == 3:\n",
" axis = (0, 1, 2)\n",
" size = x.size\n",
" else:\n",
" print(x.ndim)\n",
" raise ValueError('Dimension should be 3 or 4')\n",
"\n",
" mean = np.mean(x, axis=axis, keepdims=True)\n",
" std = np.std(x, axis=axis, keepdims=True)\n",
" std_adj = np.maximum(std, 1.0/np.sqrt(size))\n",
" y = (x - mean) / std_adj\n",
" return y\n",
"\n",
"def l2_normalize(x, axis=-1, epsilon=1e-10):\n",
" output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))\n",
" return output"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 人臉偵測處理回傳結果矩陣\n",
"image_size=160\n",
"def face_cropped(img,faces, margin): \n",
" aligned_images = []\n",
" for f in faces:\n",
" (x, y, w, h) = f\n",
" cropped = img[y-margin//2:y+h+margin//2,x-margin//2:x+w+margin//2, :]\n",
" aligned = resize(cropped, (image_size, image_size), mode='reflect')\n",
" aligned_images.append(aligned)\n",
" \n",
" return np.array(aligned_images)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 取得人臉Facenet預測之特徵值\n",
"def calc_embs(faces, margin=10, batch_size=1):\n",
" aligned_images = prewhiten(faces)\n",
" pd = []\n",
" for start in range(0, len(aligned_images), batch_size):\n",
" pd.append(model.predict_on_batch(aligned_images[start:start+batch_size]))\n",
" embs = l2_normalize(np.concatenate(pd))\n",
"\n",
" return embs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 人臉辨識推斷\n",
"def infer(le, clf, img):\n",
" faces = face_cascade.detectMultiScale(img,scaleFactor=1.1,minNeighbors=3)\n",
" if(len(faces)==0):\n",
" return '偵測不到人臉請重新調整'\n",
" \n",
" embs = calc_embs(face_cropped(img,faces,10))\n",
"# pred = le.inverse_transform(clf.predict(embs))\n",
" pred=get_labels(le,clf,embs)\n",
" return [faces,pred]\n",
"# Labels 解析\n",
"def get_labels(le,clf,embs):\n",
" socres=clf.predict_proba(embs)\n",
" print(socres)\n",
" results=[]\n",
" for s in socres:\n",
" if(s[s.argmax()]>0.5):\n",
" results.append(le.inverse_transform([s.argmax()])[0])\n",
" else:\n",
" results.append('Unknow')\n",
" return results"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"img=cv2.imread('../data/Test/Kevin04.jpg')\n",
"# img2=cv2.imread('../data/Test/test6.jpg')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"result=infer(le,clf,img)\n",
"# result2=infer(le,clf,img2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 顯示結果\n",
"if(len(result)==2): \n",
" fa=result[0]\n",
" na=result[1]\n",
" for f,n in zip(fa,na):\n",
" x,y,w,h=f\n",
" margin=10\n",
" cv2.rectangle(img,(x-margin//2,y-margin//2),(x+w+margin//2,y+h+margin//2),(0,0,255))\n",
" cv2.putText(img, n, (x-margin//2,y-margin//2), cv2.FONT_HERSHEY_PLAIN,1, (0, 255, 255), 1, cv2.LINE_AA)\n",
" cv2.imshow('frame',img)\n",
" cv2.waitKey(0)\n",
" cv2.destroyAllWindows()\n",
"else:\n",
" print(result)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"faces = face_cascade.detectMultiScale(img,scaleFactor=1.1,minNeighbors=3)\n",
"if(len(faces)!=0):\n",
" err,embs = calc_embs(face_cropped(faces,10))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"socre=clf.predict_proba(embs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"get_labels(le,clf,embs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fa=result1[0]\n",
"na=result1[1]\n",
"print(f,n)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for f,n in zip(fa,na):\n",
" print(str(f) + n)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment