Commit 564d3055 authored by YONG-LIN SU's avatar YONG-LIN SU

add save error detention somebody to the local folder for retrain

parent c13b3fd0
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Reference\n",
"- https://medium.com/@neotheicebird/webcam-based-image-processing-in-ipython-notebooks-47c75a022514"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"ExecuteTime": {
"end_time": "2017-12-27T10:43:38.815479Z",
"start_time": "2017-12-27T10:43:38.352970Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"source": [
"%matplotlib inline\n",
"\n",
"import numpy as np\n",
"import cv2\n",
"import matplotlib.pyplot as plt\n",
"import signal\n",
"from IPython import display\n",
"\n",
"from sklearn.svm import SVC\n",
"from sklearn.preprocessing import LabelEncoder\n",
"from skimage.transform import resize\n",
"from keras.models import load_model"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"ExecuteTime": {
"end_time": "2017-12-27T10:43:38.829958Z",
"start_time": "2017-12-27T10:43:38.828130Z"
}
},
"outputs": [],
"source": [
"cascade_path = '../model/cv2/haarcascade_frontalface_alt2.xml'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-12-27T10:43:44.388891Z",
"start_time": "2017-12-27T10:43:39.027900Z"
}
},
"outputs": [],
"source": [
"model_path = '../model/keras/facenet_keras.h5'\n",
"model = load_model(model_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-12-27T10:43:44.419587Z",
"start_time": "2017-12-27T10:43:44.404326Z"
}
},
"outputs": [],
"source": [
"def prewhiten(x):\n",
" if x.ndim == 4:\n",
" axis = (1, 2, 3)\n",
" size = x[0].size\n",
" elif x.ndim == 3:\n",
" axis = (0, 1, 2)\n",
" size = x.size\n",
" else:\n",
" raise ValueError('Dimension should be 3 or 4')\n",
"\n",
" mean = np.mean(x, axis=axis, keepdims=True)\n",
" std = np.std(x, axis=axis, keepdims=True)\n",
" std_adj = np.maximum(std, 1.0/np.sqrt(size))\n",
" y = (x - mean) / std_adj\n",
" return y\n",
"\n",
"def l2_normalize(x, axis=-1, epsilon=1e-10):\n",
" output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))\n",
" return output"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-12-27T10:43:44.447934Z",
"start_time": "2017-12-27T10:43:44.439987Z"
}
},
"outputs": [],
"source": [
"def calc_embs(imgs, margin, batch_size):\n",
" aligned_images = prewhiten(imgs)\n",
" pd = []\n",
" for start in range(0, len(aligned_images), batch_size):\n",
" pd.append(model.predict_on_batch(aligned_images[start:start+batch_size]))\n",
" embs = l2_normalize(np.concatenate(pd))\n",
"\n",
" return embs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-12-27T10:43:44.843761Z",
"start_time": "2017-12-27T10:43:44.469101Z"
}
},
"outputs": [],
"source": [
"class FaceDemo(object):\n",
" def __init__(self, cascade_path):\n",
" self.vc = None\n",
" self.cascade = cv2.CascadeClassifier(cascade_path)\n",
" self.margin = 10\n",
" self.batch_size = 1\n",
" self.n_img_per_person = 10\n",
" self.is_interrupted = False\n",
" self.data = {}\n",
" self.le = None\n",
" self.clf = None\n",
" \n",
" def _signal_handler(self, signal, frame):\n",
" self.is_interrupted = True\n",
" \n",
" def capture_images(self, name='Unknown'):\n",
" vc = cv2.VideoCapture(0)\n",
" self.vc = vc\n",
" if vc.isOpened():\n",
" is_capturing, _ = vc.read()\n",
" else:\n",
" is_capturing = False\n",
"\n",
" imgs = []\n",
" signal.signal(signal.SIGINT, self._signal_handler)\n",
" self.is_interrupted = False\n",
" while is_capturing:\n",
" is_capturing, frame = vc.read()\n",
" frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
" faces = self.cascade.detectMultiScale(frame,\n",
" scaleFactor=1.1,\n",
" minNeighbors=3,\n",
" minSize=(100, 100))\n",
" if len(faces) != 0:\n",
" face = faces[0]\n",
" (x, y, w, h) = face\n",
" left = x - self.margin // 2\n",
" right = x + w + self.margin // 2\n",
" bottom = y - self.margin // 2\n",
" top = y + h + self.margin // 2\n",
" img = resize(frame[bottom:top, left:right, :],\n",
" (160, 160), mode='reflect')\n",
" imgs.append(img)\n",
" cv2.rectangle(frame,\n",
" (left-1, bottom-1),\n",
" (right+1, top+1),\n",
" (255, 0, 0), thickness=2)\n",
"\n",
" plt.imshow(frame)\n",
" plt.title('{}/{}'.format(len(imgs), self.n_img_per_person))\n",
" plt.xticks([])\n",
" plt.yticks([])\n",
" display.clear_output(wait=True)\n",
" if len(imgs) == self.n_img_per_person:\n",
" vc.release()\n",
" self.data[name] = np.array(imgs)\n",
" break\n",
" try:\n",
" plt.pause(0.1)\n",
" except Exception:\n",
" pass\n",
" if self.is_interrupted:\n",
" vc.release()\n",
" break\n",
" \n",
" def train(self):\n",
" labels = []\n",
" embs = []\n",
" names = self.data.keys()\n",
" for name, imgs in self.data.items():\n",
" embs_ = calc_embs(imgs, self.margin, self.batch_size) \n",
" labels.extend([name] * len(embs_))\n",
" embs.append(embs_)\n",
"\n",
" embs = np.concatenate(embs)\n",
" le = LabelEncoder().fit(labels)\n",
" y = le.transform(labels)\n",
" clf = SVC(kernel='linear', probability=True).fit(embs, y)\n",
" \n",
" self.le = le\n",
" self.clf = clf\n",
" \n",
" def infer(self):\n",
" vc = cv2.VideoCapture(0)\n",
" self.vc = vc\n",
" if vc.isOpened():\n",
" is_capturing, _ = vc.read()\n",
" else:\n",
" is_capturing = False\n",
"\n",
" signal.signal(signal.SIGINT, self._signal_handler)\n",
" self.is_interrupted = False\n",
" while is_capturing:\n",
" is_capturing, frame = vc.read()\n",
" frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
" faces = self.cascade.detectMultiScale(frame,\n",
" scaleFactor=1.1,\n",
" minNeighbors=3,\n",
" minSize=(100, 100))\n",
" pred = None\n",
" if len(faces) != 0:\n",
" face = faces[0]\n",
" (x, y, w, h) = face\n",
" left = x - self.margin // 2\n",
" right = x + w + self.margin // 2\n",
" bottom = y - self.margin // 2\n",
" top = y + h + self.margin // 2\n",
" img = resize(frame[bottom:top, left:right, :],\n",
" (160, 160), mode='reflect')\n",
" embs = calc_embs(img[np.newaxis], self.margin, 1)\n",
" pred = self.le.inverse_transform(self.clf.predict(embs))\n",
" cv2.rectangle(frame,\n",
" (left-1, bottom-1),\n",
" (right+1, top+1),\n",
" (255, 0, 0), thickness=2)\n",
" plt.imshow(frame)\n",
" plt.title(pred)\n",
" plt.xticks([])\n",
" plt.yticks([])\n",
" display.clear_output(wait=True)\n",
" try:\n",
" plt.pause(0.1)\n",
" except Exception:\n",
" pass\n",
" if self.is_interrupted:\n",
" vc.release()\n",
" break"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-12-27T10:43:44.873391Z",
"start_time": "2017-12-27T10:43:44.861742Z"
}
},
"outputs": [],
"source": [
"f = FaceDemo(cascade_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-12-27T10:44:10.714335Z",
"start_time": "2017-12-27T10:44:06.972864Z"
}
},
"outputs": [],
"source": [
"# Train with two or more people\n",
"f.capture_images('nyoki-mtl')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-12-27T10:44:17.284697Z",
"start_time": "2017-12-27T10:44:15.933323Z"
}
},
"outputs": [],
"source": [
"f.train()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-12-27T10:44:36.348569Z",
"start_time": "2017-12-27T10:44:17.930893Z"
}
},
"outputs": [],
"source": [
"f.infer()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.6"
},
"varInspector": {
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"delete_cmd_postfix": "",
"delete_cmd_prefix": "del ",
"library": "var_list.py",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"delete_cmd_postfix": ") ",
"delete_cmd_prefix": "rm(",
"library": "var_list.r",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
],
"window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 2
}
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Reference\n",
"\n",
"- http://machinethink.net/blog/coreml-custom-layers/"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"ExecuteTime": {
"end_time": "2017-12-27T10:47:01.059894Z",
"start_time": "2017-12-27T10:46:55.443001Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"source": [
"import sys\n",
"sys.path.append('../code/')\n",
"\n",
"from inception_resnet_v1 import *\n",
"model = InceptionResNetV1(weights_path='../model/keras/weights/facenet_keras_weights.h5')"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"ExecuteTime": {
"end_time": "2017-12-27T10:47:02.740150Z",
"start_time": "2017-12-27T10:47:02.677541Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:root:Keras version 2.1.2 detected. Last version known to be fully compatible of Keras is 2.0.6 .\n",
"WARNING:root:TensorFlow version 1.4.1 detected. Last version known to be fully compatible is 1.2.1 .\n"
]
}
],
"source": [
"from coremltools.proto import NeuralNetwork_pb2\n",
"\n",
"# The conversion function for Lambda layers.\n",
"def convert_lambda(layer):\n",
" if layer.function == scaling:\n",
" params = NeuralNetwork_pb2.CustomLayerParams()\n",
" params.className = \"scaling\"\n",
" return params\n",
" else:\n",
" return None"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"ExecuteTime": {
"end_time": "2017-12-27T10:47:20.149309Z",
"start_time": "2017-12-27T10:47:07.053856Z"
},
"scrolled": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0 : input_1, <keras.engine.topology.InputLayer object at 0x7fd8e4406410>\n",
"1 : Conv2d_1a_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ad70b750>\n",
"2 : Conv2d_1a_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad70b850>\n",
"3 : Conv2d_1a_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ad70b910>\n",
"4 : Conv2d_2a_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ad70bb10>\n",
"5 : Conv2d_2a_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad688d90>\n",
"6 : Conv2d_2a_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ad63da50>\n",
"7 : Conv2d_2b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ad63d450>\n",
"8 : Conv2d_2b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad619850>\n",
"9 : Conv2d_2b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ad5ce510>\n",
"10 : MaxPool_3a_3x3, <keras.layers.pooling.MaxPooling2D object at 0x7fd8ad5cecd0>\n",
"11 : Conv2d_3b_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ad5dead0>\n",
"12 : Conv2d_3b_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad5a7950>\n",
"13 : Conv2d_3b_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ad555a50>\n",
"14 : Conv2d_4a_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ad555610>\n",
"15 : Conv2d_4a_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad51afd0>\n",
"16 : Conv2d_4a_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ad4e2510>\n",
"17 : Conv2d_4b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ad4cbd10>\n",
"18 : Conv2d_4b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad4a7b10>\n",
"19 : Conv2d_4b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ad4557d0>\n",
"20 : Block35_1_Branch_2_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ad287990>\n",
"21 : Block35_1_Branch_2_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad2f2490>\n",
"22 : Block35_1_Branch_2_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ad239690>\n",
"23 : Block35_1_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ad379750>\n",
"24 : Block35_1_Branch_2_Conv2d_0b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ad2192d0>\n",
"25 : Block35_1_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad3e5650>\n",
"26 : Block35_1_Branch_2_Conv2d_0b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad253150>\n",
"27 : Block35_1_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ad3b0850>\n",
"28 : Block35_1_Branch_2_Conv2d_0b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ad1cda90>\n",
"29 : Block35_1_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ad46cd90>\n",
"30 : Block35_1_Branch_1_Conv2d_0b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ad34a0d0>\n",
"31 : Block35_1_Branch_2_Conv2d_0c_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ad195a50>\n",
"32 : Block35_1_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad46ce90>\n",
"33 : Block35_1_Branch_1_Conv2d_0b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad30a090>\n",
"34 : Block35_1_Branch_2_Conv2d_0c_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad17c6d0>\n",
"35 : Block35_1_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ad434c90>\n",
"36 : Block35_1_Branch_1_Conv2d_0b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ad325d50>\n",
"37 : Block35_1_Branch_2_Conv2d_0c_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ad1458d0>\n",
"38 : Block35_1_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8ad125510>\n",
"39 : Block35_1_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ad0d7210>\n",
"40 : lambda_1, <keras.layers.core.Lambda object at 0x7fd8ad0d7190>\n",
"41 : add_1, <keras.layers.merge.Add object at 0x7fd8ad0878d0>\n",
"42 : Block35_1_Activation, <keras.layers.core.Activation object at 0x7fd8ad0a00d0>\n",
"43 : Block35_2_Branch_2_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8acf72590>\n",
"44 : Block35_2_Branch_2_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8acf5e490>\n",
"45 : Block35_2_Branch_2_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aced7d50>\n",
"46 : Block35_2_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ad066350>\n",
"47 : Block35_2_Branch_2_Conv2d_0b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8acef3b90>\n",
"48 : Block35_2_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad036790>\n",
"49 : Block35_2_Branch_2_Conv2d_0b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8acf26d10>\n",
"50 : Block35_2_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ad018b10>\n",
"51 : Block35_2_Branch_2_Conv2d_0b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ace3a690>\n",
"52 : Block35_2_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ad0a0a90>\n",
"53 : Block35_2_Branch_1_Conv2d_0b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8acfe6ad0>\n",
"54 : Block35_2_Branch_2_Conv2d_0c_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8acdfeb50>\n",
"55 : Block35_2_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ad0a0bd0>\n",
"56 : Block35_2_Branch_1_Conv2d_0b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8acfcb750>\n",
"57 : Block35_2_Branch_2_Conv2d_0c_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ace6b350>\n",
"58 : Block35_2_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ad036410>\n",
"59 : Block35_2_Branch_1_Conv2d_0b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8acf92950>\n",
"60 : Block35_2_Branch_2_Conv2d_0c_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ace33f90>\n",
"61 : Block35_2_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8acdcf110>\n",
"62 : Block35_2_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8acdabf90>\n",
"63 : lambda_2, <keras.layers.core.Lambda object at 0x7fd8acd43a90>\n",
"64 : add_2, <keras.layers.merge.Add object at 0x7fd8acd5f210>\n",
"65 : Block35_2_Activation, <keras.layers.core.Activation object at 0x7fd8acd706d0>\n",
"66 : Block35_3_Branch_2_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8acbd5850>\n",
"67 : Block35_3_Branch_2_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8acbc1450>\n",
"68 : Block35_3_Branch_2_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8acb8a2d0>\n",
"69 : Block35_3_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8acce2ed0>\n",
"70 : Block35_3_Branch_2_Conv2d_0b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8acba2190>\n",
"71 : Block35_3_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8acd1f590>\n",
"72 : Block35_3_Branch_2_Conv2d_0b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8acb69250>\n",
"73 : Block35_3_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8accca310>\n",
"74 : Block35_3_Branch_2_Conv2d_0b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8acb1b750>\n",
"75 : Block35_3_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8acd0d890>\n",
"76 : Block35_3_Branch_1_Conv2d_0b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8acc3fcd0>\n",
"77 : Block35_3_Branch_2_Conv2d_0c_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8acae0c10>\n",
"78 : Block35_3_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8acd0de90>\n",
"79 : Block35_3_Branch_1_Conv2d_0b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8accac650>\n",
"80 : Block35_3_Branch_2_Conv2d_0c_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8acacc410>\n",
"81 : Block35_3_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8acd1f210>\n",
"82 : Block35_3_Branch_1_Conv2d_0b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8acc29d10>\n",
"83 : Block35_3_Branch_2_Conv2d_0c_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8aca44ad0>\n",
"84 : Block35_3_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8acaaf190>\n",
"85 : Block35_3_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aca70790>\n",
"86 : lambda_3, <keras.layers.core.Lambda object at 0x7fd8aca24b50>\n",
"87 : add_3, <keras.layers.merge.Add object at 0x7fd8ac9bc2d0>\n",
"88 : Block35_3_Activation, <keras.layers.core.Activation object at 0x7fd8ac9d1790>\n",
"89 : Block35_4_Branch_2_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ac83d910>\n",
"90 : Block35_4_Branch_2_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac8a5510>\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"91 : Block35_4_Branch_2_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ac86b390>\n",
"92 : Block35_4_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ac9ae550>\n",
"93 : Block35_4_Branch_2_Conv2d_0b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ac805050>\n",
"94 : Block35_4_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac987650>\n",
"95 : Block35_4_Branch_2_Conv2d_0b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac7c9310>\n",
"96 : Block35_4_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ac9623d0>\n",
"97 : Block35_4_Branch_2_Conv2d_0b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ac77a810>\n",
"98 : Block35_4_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ac9ef6d0>\n",
"99 : Block35_4_Branch_1_Conv2d_0b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ac928f10>\n",
"100 : Block35_4_Branch_2_Conv2d_0c_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ac742cd0>\n",
"101 : Block35_4_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac9eff50>\n",
"102 : Block35_4_Branch_1_Conv2d_0b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac912710>\n",
"103 : Block35_4_Branch_2_Conv2d_0c_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac7ad4d0>\n",
"104 : Block35_4_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ac9872d0>\n",
"105 : Block35_4_Branch_1_Conv2d_0b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ac88add0>\n",
"106 : Block35_4_Branch_2_Conv2d_0c_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ac728b90>\n",
"107 : Block35_4_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8ac70d250>\n",
"108 : Block35_4_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ac6d9850>\n",
"109 : lambda_4, <keras.layers.core.Lambda object at 0x7fd8ac68ac10>\n",
"110 : add_4, <keras.layers.merge.Add object at 0x7fd8ac63a850>\n",
"111 : Block35_4_Activation, <keras.layers.core.Activation object at 0x7fd8ac652050>\n",
"112 : Block35_5_Branch_2_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ac51d9d0>\n",
"113 : Block35_5_Branch_2_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac5075d0>\n",
"114 : Block35_5_Branch_2_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ac4d0450>\n",
"115 : Block35_5_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ac666710>\n",
"116 : Block35_5_Branch_2_Conv2d_0b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ac4eb110>\n",
"117 : Block35_5_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac666d90>\n",
"118 : Block35_5_Branch_2_Conv2d_0b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac4aa3d0>\n",
"119 : Block35_5_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ac5c5490>\n",
"120 : Block35_5_Branch_2_Conv2d_0b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ac45f8d0>\n",
"121 : Block35_5_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ac652a10>\n",
"122 : Block35_5_Branch_1_Conv2d_0b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ac590fd0>\n",
"123 : Block35_5_Branch_2_Conv2d_0c_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ac429d90>\n",
"124 : Block35_5_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac652b50>\n",
"125 : Block35_5_Branch_1_Conv2d_0b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac5f37d0>\n",
"126 : Block35_5_Branch_2_Conv2d_0c_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac40f590>\n",
"127 : Block35_5_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ac666390>\n",
"128 : Block35_5_Branch_1_Conv2d_0b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ac56ce90>\n",
"129 : Block35_5_Branch_2_Conv2d_0c_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ac38bc50>\n",
"130 : Block35_5_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8ac3f5350>\n",
"131 : Block35_5_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ac36acd0>\n",
"132 : lambda_5, <keras.layers.core.Lambda object at 0x7fd8ac36a190>\n",
"133 : add_5, <keras.layers.merge.Add object at 0x7fd8ac31c910>\n",
"134 : Block35_5_Activation, <keras.layers.core.Activation object at 0x7fd8ac335110>\n",
"135 : Mixed_6a_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ac2cd4d0>\n",
"136 : Mixed_6a_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac2cde10>\n",
"137 : Mixed_6a_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ac256ed0>\n",
"138 : Mixed_6a_Branch_1_Conv2d_0b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ac26ded0>\n",
"139 : Mixed_6a_Branch_1_Conv2d_0b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac2a5c10>\n",
"140 : Mixed_6a_Branch_1_Conv2d_0b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ac1cff10>\n",
"141 : Mixed_6a_Branch_0_Conv2d_1a_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ad455190>\n",
"142 : Mixed_6a_Branch_1_Conv2d_1a_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8ac180a50>\n",
"143 : Mixed_6a_Branch_0_Conv2d_1a_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac335ad0>\n",
"144 : Mixed_6a_Branch_1_Conv2d_1a_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac1e77d0>\n",
"145 : Mixed_6a_Branch_0_Conv2d_1a_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ac2cd550>\n",
"146 : Mixed_6a_Branch_1_Conv2d_1a_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8ac1b0450>\n",
"147 : Mixed_6a_Branch_2_MaxPool_1a_3x3, <keras.layers.pooling.MaxPooling2D object at 0x7fd8ac14b050>\n",
"148 : Mixed_6a, <keras.layers.merge.Concatenate object at 0x7fd8ac15fc10>\n",
"149 : Block17_1_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ac09b450>\n",
"150 : Block17_1_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac0ed890>\n",
"151 : Block17_1_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ac04c950>\n",
"152 : Block17_1_Branch_1_Conv2d_0b_1x7, <keras.layers.convolutional.Conv2D object at 0x7fd8ac017e10>\n",
"153 : Block17_1_Branch_1_Conv2d_0b_1x7_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8abffa610>\n",
"154 : Block17_1_Branch_1_Conv2d_0b_1x7_Activation, <keras.layers.core.Activation object at 0x7fd8abf79cd0>\n",
"155 : Block17_1_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ac125f90>\n",
"156 : Block17_1_Branch_1_Conv2d_0c_7x1, <keras.layers.convolutional.Conv2D object at 0x7fd8abf92050>\n",
"157 : Block17_1_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ac0bfc50>\n",
"158 : Block17_1_Branch_1_Conv2d_0c_7x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8abfa5810>\n",
"159 : Block17_1_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ac0ed6d0>\n",
"160 : Block17_1_Branch_1_Conv2d_0c_7x1_Activation, <keras.layers.core.Activation object at 0x7fd8abf57210>\n",
"161 : Block17_1_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8abeb9210>\n",
"162 : Block17_1_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8abed0bd0>\n",
"163 : lambda_6, <keras.layers.core.Lambda object at 0x7fd8abed0c10>\n",
"164 : add_6, <keras.layers.merge.Add object at 0x7fd8abee8e90>\n",
"165 : Block17_1_Activation, <keras.layers.core.Activation object at 0x7fd8abe9dcd0>\n",
"166 : Block17_2_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8abe5df90>\n",
"167 : Block17_2_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8abe4a250>\n",
"168 : Block17_2_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8abdbdfd0>\n",
"169 : Block17_2_Branch_1_Conv2d_0b_1x7, <keras.layers.convolutional.Conv2D object at 0x7fd8abdeab10>\n",
"170 : Block17_2_Branch_1_Conv2d_0b_1x7_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8abdd5890>\n",
"171 : Block17_2_Branch_1_Conv2d_0b_1x7_Activation, <keras.layers.core.Activation object at 0x7fd8abda0f50>\n",
"172 : Block17_2_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8abe9d210>\n",
"173 : Block17_2_Branch_1_Conv2d_0c_7x1, <keras.layers.convolutional.Conv2D object at 0x7fd8abcfc510>\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"174 : Block17_2_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8abeb1e50>\n",
"175 : Block17_2_Branch_1_Conv2d_0c_7x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8abd3e0d0>\n",
"176 : Block17_2_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8abeb1f10>\n",
"177 : Block17_2_Branch_1_Conv2d_0c_7x1_Activation, <keras.layers.core.Activation object at 0x7fd8abd2e390>\n",
"178 : Block17_2_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8abce06d0>\n",
"179 : Block17_2_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8abca78d0>\n",
"180 : lambda_7, <keras.layers.core.Lambda object at 0x7fd8abca7910>\n",
"181 : add_7, <keras.layers.merge.Add object at 0x7fd8abc5aed0>\n",
"182 : Block17_2_Activation, <keras.layers.core.Activation object at 0x7fd8abc759d0>\n",
"183 : Block17_3_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8abc34e10>\n",
"184 : Block17_3_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8abc1f1d0>\n",
"185 : Block17_3_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8abb97cd0>\n",
"186 : Block17_3_Branch_1_Conv2d_0b_1x7, <keras.layers.convolutional.Conv2D object at 0x7fd8abbb0050>\n",
"187 : Block17_3_Branch_1_Conv2d_0b_1x7_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8abb45810>\n",
"188 : Block17_3_Branch_1_Conv2d_0b_1x7_Activation, <keras.layers.core.Activation object at 0x7fd8abb75210>\n",
"189 : Block17_3_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8abc75d90>\n",
"190 : Block17_3_Branch_1_Conv2d_0c_7x1, <keras.layers.convolutional.Conv2D object at 0x7fd8abb0e0d0>\n",
"191 : Block17_3_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8abc07b50>\n",
"192 : Block17_3_Branch_1_Conv2d_0c_7x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8abad4210>\n",
"193 : Block17_3_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8abc07b90>\n",
"194 : Block17_3_Branch_1_Conv2d_0c_7x1_Activation, <keras.layers.core.Activation object at 0x7fd8aba87710>\n",
"195 : Block17_3_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8aba3c3d0>\n",
"196 : Block17_3_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aba005d0>\n",
"197 : lambda_8, <keras.layers.core.Lambda object at 0x7fd8aba00610>\n",
"198 : add_8, <keras.layers.merge.Add object at 0x7fd8aba32bd0>\n",
"199 : Block17_3_Activation, <keras.layers.core.Activation object at 0x7fd8ab9c96d0>\n",
"200 : Block17_4_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab989b10>\n",
"201 : Block17_4_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab9f4890>\n",
"202 : Block17_4_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab93df50>\n",
"203 : Block17_4_Branch_1_Conv2d_0b_1x7, <keras.layers.convolutional.Conv2D object at 0x7fd8ab91a510>\n",
"204 : Block17_4_Branch_1_Conv2d_0b_1x7_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab9590d0>\n",
"205 : Block17_4_Branch_1_Conv2d_0b_1x7_Activation, <keras.layers.core.Activation object at 0x7fd8ab8ce390>\n",
"206 : Block17_4_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab9c9fd0>\n",
"207 : Block17_4_Branch_1_Conv2d_0c_7x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab899ed0>\n",
"208 : Block17_4_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab9c9f90>\n",
"209 : Block17_4_Branch_1_Conv2d_0c_7x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab87d6d0>\n",
"210 : Block17_4_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab9df890>\n",
"211 : Block17_4_Branch_1_Conv2d_0c_7x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab875d90>\n",
"212 : Block17_4_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8ab811650>\n",
"213 : Block17_4_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab7d5e10>\n",
"214 : lambda_9, <keras.layers.core.Lambda object at 0x7fd8ab7d5310>\n",
"215 : add_9, <keras.layers.merge.Add object at 0x7fd8ab7878d0>\n",
"216 : Block17_4_Activation, <keras.layers.core.Activation object at 0x7fd8ab7a13d0>\n",
"217 : Block17_5_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab763810>\n",
"218 : Block17_5_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab7b5910>\n",
"219 : Block17_5_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab713210>\n",
"220 : Block17_5_Branch_1_Conv2d_0b_1x7, <keras.layers.convolutional.Conv2D object at 0x7fd8ab7300d0>\n",
"221 : Block17_5_Branch_1_Conv2d_0b_1x7_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab6f5210>\n",
"222 : Block17_5_Branch_1_Conv2d_0b_1x7_Activation, <keras.layers.core.Activation object at 0x7fd8ab6a6710>\n",
"223 : Block17_5_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab7a1c10>\n",
"224 : Block17_5_Branch_1_Conv2d_0c_7x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab66ebd0>\n",
"225 : Block17_5_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab7a1d50>\n",
"226 : Block17_5_Branch_1_Conv2d_0c_7x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab6593d0>\n",
"227 : Block17_5_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab7b5590>\n",
"228 : Block17_5_Branch_1_Conv2d_0c_7x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab5d2a90>\n",
"229 : Block17_5_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8ab5bb190>\n",
"230 : Block17_5_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab598f90>\n",
"231 : lambda_10, <keras.layers.core.Lambda object at 0x7fd8ab5aeb10>\n",
"232 : add_10, <keras.layers.merge.Add object at 0x7fd8ab547290>\n",
"233 : Block17_5_Activation, <keras.layers.core.Activation object at 0x7fd8ab55f5d0>\n",
"234 : Block17_6_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab4ba510>\n",
"235 : Block17_6_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab510610>\n",
"236 : Block17_6_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab4ec390>\n",
"237 : Block17_6_Branch_1_Conv2d_0b_1x7, <keras.layers.convolutional.Conv2D object at 0x7fd8ab4b3ed0>\n",
"238 : Block17_6_Branch_1_Conv2d_0b_1x7_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab49a690>\n",
"239 : Block17_6_Branch_1_Conv2d_0b_1x7_Activation, <keras.layers.core.Activation object at 0x7fd8ab415d90>\n",
"240 : Block17_6_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab4f9810>\n",
"241 : Block17_6_Branch_1_Conv2d_0c_7x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab3c58d0>\n",
"242 : Block17_6_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab4f9f10>\n",
"243 : Block17_6_Branch_1_Conv2d_0c_7x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab42f650>\n",
"244 : Block17_6_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab510290>\n",
"245 : Block17_6_Branch_1_Conv2d_0c_7x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab3f22d0>\n",
"246 : Block17_6_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8ab3562d0>\n",
"247 : Block17_6_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab36cc90>\n",
"248 : lambda_11, <keras.layers.core.Lambda object at 0x7fd8ab36ccd0>\n",
"249 : add_11, <keras.layers.merge.Add object at 0x7fd8ab307f50>\n",
"250 : Block17_6_Activation, <keras.layers.core.Activation object at 0x7fd8ab2bcd90>\n",
"251 : Block17_7_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab292210>\n",
"252 : Block17_7_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab2e6310>\n",
"253 : Block17_7_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab245710>\n",
"254 : Block17_7_Branch_1_Conv2d_0b_1x7, <keras.layers.convolutional.Conv2D object at 0x7fd8ab20bbd0>\n",
"255 : Block17_7_Branch_1_Conv2d_0b_1x7_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab2743d0>\n",
"256 : Block17_7_Branch_1_Conv2d_0b_1x7_Activation, <keras.layers.core.Activation object at 0x7fd8ab1efa90>\n",
"257 : Block17_7_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab2bc2d0>\n",
"258 : Block17_7_Branch_1_Conv2d_0c_7x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab1a05d0>\n",
"259 : Block17_7_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab2d1f10>\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"260 : Block17_7_Branch_1_Conv2d_0c_7x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab1d9190>\n",
"261 : Block17_7_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab2d1f50>\n",
"262 : Block17_7_Branch_1_Conv2d_0c_7x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab152450>\n",
"263 : Block17_7_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8ab103790>\n",
"264 : Block17_7_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab0ca990>\n",
"265 : lambda_12, <keras.layers.core.Lambda object at 0x7fd8ab0ca9d0>\n",
"266 : add_12, <keras.layers.merge.Add object at 0x7fd8ab07bf90>\n",
"267 : Block17_7_Activation, <keras.layers.core.Activation object at 0x7fd8ab092a90>\n",
"268 : Block17_8_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab051ed0>\n",
"269 : Block17_8_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab03e290>\n",
"270 : Block17_8_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab034d90>\n",
"271 : Block17_8_Branch_1_Conv2d_0b_1x7, <keras.layers.convolutional.Conv2D object at 0x7fd8aafe58d0>\n",
"272 : Block17_8_Branch_1_Conv2d_0b_1x7_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aafce650>\n",
"273 : Block17_8_Branch_1_Conv2d_0b_1x7_Activation, <keras.layers.core.Activation object at 0x7fd8aaf942d0>\n",
"274 : Block17_8_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ab092e50>\n",
"275 : Block17_8_Branch_1_Conv2d_0c_7x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aafae190>\n",
"276 : Block17_8_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8ab0a5c10>\n",
"277 : Block17_8_Branch_1_Conv2d_0c_7x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aaf732d0>\n",
"278 : Block17_8_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8ab0a5c50>\n",
"279 : Block17_8_Branch_1_Conv2d_0c_7x1_Activation, <keras.layers.core.Activation object at 0x7fd8aaf257d0>\n",
"280 : Block17_8_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8aaed8490>\n",
"281 : Block17_8_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aaea0690>\n",
"282 : lambda_13, <keras.layers.core.Lambda object at 0x7fd8aaea06d0>\n",
"283 : add_13, <keras.layers.merge.Add object at 0x7fd8aae53c90>\n",
"284 : Block17_8_Activation, <keras.layers.core.Activation object at 0x7fd8aae68790>\n",
"285 : Block17_9_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aae27bd0>\n",
"286 : Block17_9_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aae13950>\n",
"287 : Block17_9_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aad8ba90>\n",
"288 : Block17_9_Branch_1_Conv2d_0b_1x7, <keras.layers.convolutional.Conv2D object at 0x7fd8aad3b5d0>\n",
"289 : Block17_9_Branch_1_Conv2d_0b_1x7_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aad78190>\n",
"290 : Block17_9_Branch_1_Conv2d_0b_1x7_Activation, <keras.layers.core.Activation object at 0x7fd8aad6d450>\n",
"291 : Block17_9_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aae68890>\n",
"292 : Block17_9_Branch_1_Conv2d_0c_7x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aacb6f90>\n",
"293 : Block17_9_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aae689d0>\n",
"294 : Block17_9_Branch_1_Conv2d_0c_7x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aad1d790>\n",
"295 : Block17_9_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aadff950>\n",
"296 : Block17_9_Branch_1_Conv2d_0c_7x1_Activation, <keras.layers.core.Activation object at 0x7fd8aac96e50>\n",
"297 : Block17_9_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8aacb0710>\n",
"298 : Block17_9_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aabfaed0>\n",
"299 : lambda_14, <keras.layers.core.Lambda object at 0x7fd8aabfa3d0>\n",
"300 : add_14, <keras.layers.merge.Add object at 0x7fd8aac29990>\n",
"301 : Block17_9_Activation, <keras.layers.core.Activation object at 0x7fd8aabc4490>\n",
"302 : Block17_10_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aab848d0>\n",
"303 : Block17_10_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aabee650>\n",
"304 : Block17_10_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aabb42d0>\n",
"305 : Block17_10_Branch_1_Conv2d_0b_1x7, <keras.layers.convolutional.Conv2D object at 0x7fd8aab4e190>\n",
"306 : Block17_10_Branch_1_Conv2d_0b_1x7_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aab122d0>\n",
"307 : Block17_10_Branch_1_Conv2d_0b_1x7_Activation, <keras.layers.core.Activation object at 0x7fd8aaac57d0>\n",
"308 : Block17_10_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aabc4cd0>\n",
"309 : Block17_10_Branch_1_Conv2d_0c_7x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aaa8cc90>\n",
"310 : Block17_10_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aabc4e10>\n",
"311 : Block17_10_Branch_1_Conv2d_0c_7x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aaa77490>\n",
"312 : Block17_10_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aabd5650>\n",
"313 : Block17_10_Branch_1_Conv2d_0c_7x1_Activation, <keras.layers.core.Activation object at 0x7fd8aaa70b50>\n",
"314 : Block17_10_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8aaa57210>\n",
"315 : Block17_10_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aaa1d810>\n",
"316 : lambda_15, <keras.layers.core.Lambda object at 0x7fd8aa9cebd0>\n",
"317 : add_15, <keras.layers.merge.Add object at 0x7fd8aa9e9350>\n",
"318 : Block17_10_Activation, <keras.layers.core.Activation object at 0x7fd8aa97f690>\n",
"319 : Mixed_7a_Branch_2_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aa82b1d0>\n",
"320 : Mixed_7a_Branch_2_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa7ee310>\n",
"321 : Mixed_7a_Branch_2_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aa7a1810>\n",
"322 : Mixed_7a_Branch_0_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8ac125e10>\n",
"323 : Mixed_7a_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aa8d1f10>\n",
"324 : Mixed_7a_Branch_2_Conv2d_0b_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8aa769cd0>\n",
"325 : Mixed_7a_Branch_0_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa997990>\n",
"326 : Mixed_7a_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa8b8710>\n",
"327 : Mixed_7a_Branch_2_Conv2d_0b_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa7544d0>\n",
"328 : Mixed_7a_Branch_0_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aa9ae410>\n",
"329 : Mixed_7a_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aa8b4dd0>\n",
"330 : Mixed_7a_Branch_2_Conv2d_0b_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8aa6cab90>\n",
"331 : Mixed_7a_Branch_0_Conv2d_1a_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8aa955550>\n",
"332 : Mixed_7a_Branch_1_Conv2d_1a_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8aa860910>\n",
"333 : Mixed_7a_Branch_2_Conv2d_1a_3x3, <keras.layers.convolutional.Conv2D object at 0x7fd8aa6b6290>\n",
"334 : Mixed_7a_Branch_0_Conv2d_1a_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa9ae390>\n",
"335 : Mixed_7a_Branch_1_Conv2d_1a_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa84c690>\n",
"336 : Mixed_7a_Branch_2_Conv2d_1a_3x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa6b6250>\n",
"337 : Mixed_7a_Branch_0_Conv2d_1a_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8aa9063d0>\n",
"338 : Mixed_7a_Branch_1_Conv2d_1a_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8aa811310>\n",
"339 : Mixed_7a_Branch_2_Conv2d_1a_3x3_Activation, <keras.layers.core.Activation object at 0x7fd8aa659f10>\n",
"340 : Mixed_7a_Branch_3_MaxPool_1a_3x3, <keras.layers.pooling.MaxPooling2D object at 0x7fd8aa670f10>\n",
"341 : Mixed_7a, <keras.layers.merge.Concatenate object at 0x7fd8aa6a7c10>\n",
"342 : Block8_1_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aa581f10>\n",
"343 : Block8_1_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa5eb510>\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"344 : Block8_1_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aa560f50>\n",
"345 : Block8_1_Branch_1_Conv2d_0b_1x3, <keras.layers.convolutional.Conv2D object at 0x7fd8aa512a90>\n",
"346 : Block8_1_Branch_1_Conv2d_0b_1x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa4f9810>\n",
"347 : Block8_1_Branch_1_Conv2d_0b_1x3_Activation, <keras.layers.core.Activation object at 0x7fd8aa4c2490>\n",
"348 : Block8_1_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aa620ad0>\n",
"349 : Block8_1_Branch_1_Conv2d_0c_3x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aa4de050>\n",
"350 : Block8_1_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa5bccd0>\n",
"351 : Block8_1_Branch_1_Conv2d_0c_3x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa49f490>\n",
"352 : Block8_1_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aa5d1f50>\n",
"353 : Block8_1_Branch_1_Conv2d_0c_3x1_Activation, <keras.layers.core.Activation object at 0x7fd8aa451990>\n",
"354 : Block8_1_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8aa400650>\n",
"355 : Block8_1_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aa400bd0>\n",
"356 : lambda_16, <keras.layers.core.Lambda object at 0x7fd8aa3ce850>\n",
"357 : add_16, <keras.layers.merge.Add object at 0x7fd8aa3e7b10>\n",
"358 : Block8_1_Activation, <keras.layers.core.Activation object at 0x7fd8aa3e7a90>\n",
"359 : Block8_2_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aa357d90>\n",
"360 : Block8_2_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa341550>\n",
"361 : Block8_2_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aa2bbc50>\n",
"362 : Block8_2_Branch_1_Conv2d_0b_1x3, <keras.layers.convolutional.Conv2D object at 0x7fd8aa2ec790>\n",
"363 : Block8_2_Branch_1_Conv2d_0b_1x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa327350>\n",
"364 : Block8_2_Branch_1_Conv2d_0b_1x3_Activation, <keras.layers.core.Activation object at 0x7fd8aa24afd0>\n",
"365 : Block8_2_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aa3963d0>\n",
"366 : Block8_2_Branch_1_Conv2d_0c_3x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aa264b10>\n",
"367 : Block8_2_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa396a50>\n",
"368 : Block8_2_Branch_1_Conv2d_0c_3x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa29bd10>\n",
"369 : Block8_2_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aa3aa990>\n",
"370 : Block8_2_Branch_1_Conv2d_0c_3x1_Activation, <keras.layers.core.Activation object at 0x7fd8aa22d690>\n",
"371 : Block8_2_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8aa1de350>\n",
"372 : Block8_2_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aa1de8d0>\n",
"373 : lambda_17, <keras.layers.core.Lambda object at 0x7fd8aa1a7550>\n",
"374 : add_17, <keras.layers.merge.Add object at 0x7fd8aa143810>\n",
"375 : Block8_2_Activation, <keras.layers.core.Activation object at 0x7fd8aa143790>\n",
"376 : Block8_3_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aa12ea90>\n",
"377 : Block8_3_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa117810>\n",
"378 : Block8_3_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aa0df490>\n",
"379 : Block8_3_Branch_1_Conv2d_0b_1x3, <keras.layers.convolutional.Conv2D object at 0x7fd8aa07d050>\n",
"380 : Block8_3_Branch_1_Conv2d_0b_1x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa040490>\n",
"381 : Block8_3_Branch_1_Conv2d_0b_1x3_Activation, <keras.layers.core.Activation object at 0x7fd8aa073990>\n",
"382 : Block8_3_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aa16e0d0>\n",
"383 : Block8_3_Branch_1_Conv2d_0c_3x1, <keras.layers.convolutional.Conv2D object at 0x7fd8a9fb9cd0>\n",
"384 : Block8_3_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa16ef10>\n",
"385 : Block8_3_Branch_1_Conv2d_0c_3x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8aa020650>\n",
"386 : Block8_3_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8aa101690>\n",
"387 : Block8_3_Branch_1_Conv2d_0c_3x1_Activation, <keras.layers.core.Activation object at 0x7fd8a9f9bd10>\n",
"388 : Block8_3_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8a9f375d0>\n",
"389 : Block8_3_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8a9f370d0>\n",
"390 : lambda_18, <keras.layers.core.Lambda object at 0x7fd8a9efbd90>\n",
"391 : add_18, <keras.layers.merge.Add object at 0x7fd8a9f16510>\n",
"392 : Block8_3_Activation, <keras.layers.core.Activation object at 0x7fd8a9f16110>\n",
"393 : Block8_4_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8a9e85790>\n",
"394 : Block8_4_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8a9edc910>\n",
"395 : Block8_4_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8a9e63fd0>\n",
"396 : Block8_4_Branch_1_Conv2d_0b_1x3, <keras.layers.convolutional.Conv2D object at 0x7fd8a9dffb10>\n",
"397 : Block8_4_Branch_1_Conv2d_0b_1x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8a9e39d10>\n",
"398 : Block8_4_Branch_1_Conv2d_0b_1x3_Activation, <keras.layers.core.Activation object at 0x7fd8a9dc6690>\n",
"399 : Block8_4_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8a9ec78d0>\n",
"400 : Block8_4_Branch_1_Conv2d_0c_3x1, <keras.layers.convolutional.Conv2D object at 0x7fd8a9d90b50>\n",
"401 : Block8_4_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8a9ec7f10>\n",
"402 : Block8_4_Branch_1_Conv2d_0c_3x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8a9d79350>\n",
"403 : Block8_4_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8a9edc390>\n",
"404 : Block8_4_Branch_1_Conv2d_0c_3x1_Activation, <keras.layers.core.Activation object at 0x7fd8a9d43f90>\n",
"405 : Block8_4_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8a9d5c110>\n",
"406 : Block8_4_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8a9d5c090>\n",
"407 : lambda_19, <keras.layers.core.Lambda object at 0x7fd8a9cbbf10>\n",
"408 : add_19, <keras.layers.merge.Add object at 0x7fd8a9cea210>\n",
"409 : Block8_4_Activation, <keras.layers.core.Activation object at 0x7fd8a9c81c50>\n",
"410 : Block8_5_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8a9c5a490>\n",
"411 : Block8_5_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8a9cad610>\n",
"412 : Block8_5_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8a9c0d990>\n",
"413 : Block8_5_Branch_1_Conv2d_0b_1x3, <keras.layers.convolutional.Conv2D object at 0x7fd8a9bd4cd0>\n",
"414 : Block8_5_Branch_1_Conv2d_0b_1x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8a9bbc650>\n",
"415 : Block8_5_Branch_1_Conv2d_0b_1x3_Activation, <keras.layers.core.Activation object at 0x7fd8a9b37d10>\n",
"416 : Block8_5_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8a9cd1b90>\n",
"417 : Block8_5_Branch_1_Conv2d_0c_3x1, <keras.layers.convolutional.Conv2D object at 0x7fd8a9b67850>\n",
"418 : Block8_5_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8a9c98c10>\n",
"419 : Block8_5_Branch_1_Conv2d_0c_3x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8a9b525d0>\n",
"420 : Block8_5_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8a9cad090>\n",
"421 : Block8_5_Branch_1_Conv2d_0c_3x1_Activation, <keras.layers.core.Activation object at 0x7fd8a9b19250>\n",
"422 : Block8_5_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8a9a77250>\n",
"423 : Block8_5_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8a9ac7a10>\n",
"424 : lambda_20, <keras.layers.core.Lambda object at 0x7fd8a9a90c10>\n",
"425 : add_20, <keras.layers.merge.Add object at 0x7fd8a9aaaed0>\n",
"426 : Block8_5_Activation, <keras.layers.core.Activation object at 0x7fd8a9aaae50>\n",
"427 : Block8_6_Branch_1_Conv2d_0a_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8a9a1af90>\n",
"428 : Block8_6_Branch_1_Conv2d_0a_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8a9a080d0>\n",
"429 : Block8_6_Branch_1_Conv2d_0a_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8a997efd0>\n",
"430 : Block8_6_Branch_1_Conv2d_0b_1x3, <keras.layers.convolutional.Conv2D object at 0x7fd8a99acb10>\n",
"431 : Block8_6_Branch_1_Conv2d_0b_1x3_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8a9996890>\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"432 : Block8_6_Branch_1_Conv2d_0b_1x3_Activation, <keras.layers.core.Activation object at 0x7fd8a995df50>\n",
"433 : Block8_6_Branch_0_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8aa620a90>\n",
"434 : Block8_6_Branch_1_Conv2d_0c_3x1, <keras.layers.convolutional.Conv2D object at 0x7fd8a98be510>\n",
"435 : Block8_6_Branch_0_Conv2d_1x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8a9a5bb50>\n",
"436 : Block8_6_Branch_1_Conv2d_0c_3x1_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8a98f80d0>\n",
"437 : Block8_6_Branch_0_Conv2d_1x1_Activation, <keras.layers.core.Activation object at 0x7fd8a9a71d50>\n",
"438 : Block8_6_Branch_1_Conv2d_0c_3x1_Activation, <keras.layers.core.Activation object at 0x7fd8a98ef390>\n",
"439 : Block8_6_Concatenate, <keras.layers.merge.Concatenate object at 0x7fd8a98a36d0>\n",
"440 : Block8_6_Conv2d_1x1, <keras.layers.convolutional.Conv2D object at 0x7fd8a98a3c50>\n",
"441 : lambda_21, <keras.layers.core.Lambda object at 0x7fd8a98698d0>\n",
"442 : add_21, <keras.layers.merge.Add object at 0x7fd8a9801b90>\n",
"443 : AvgPool, <keras.layers.pooling.GlobalAveragePooling2D object at 0x7fd8a9801b10>\n",
"444 : Bottleneck, <keras.layers.core.Dense object at 0x7fd8a97cba90>\n",
"445 : Bottleneck_BatchNorm, <keras.layers.normalization.BatchNormalization object at 0x7fd8a97e23d0>\n"
]
}
],
"source": [
"import coremltools\n",
"\n",
"coreml_model = coremltools.converters.keras.convert(\n",
" model,\n",
" input_names=\"image\",\n",
" image_input_names=\"image\",\n",
" output_names=\"output\",\n",
" add_custom_layers=True,\n",
" custom_conversion_functions={ \"Lambda\": convert_lambda })"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "python2.7",
"language": "python",
"name": "python2.7"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.14"
},
"varInspector": {
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"delete_cmd_postfix": "",
"delete_cmd_prefix": "del ",
"library": "var_list.py",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"delete_cmd_postfix": ") ",
"delete_cmd_prefix": "rm(",
"library": "var_list.r",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
],
"window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 2
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"from sklearn.externals import joblib\n",
"from skimage.transform import resize\n",
"from sklearn.preprocessing import LabelEncoder\n",
"from keras.models import model_from_json\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 載入人臉偵測cascade分類器\n",
"face_cascade = cv2.CascadeClassifier(\"../model/cv2/haarcascade_frontalface_alt2.xml\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 載入Facenet預測模型\n",
"model=model_from_json(open(\"../model/keras/facenet_model.json\",\"r\").read())\n",
"model.load_weights(\"../model/keras/facenet_weights.h5\")\n",
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 載入SVM分類器\n",
"clf=joblib.load('../model/20190618224324/20190618224324.pkl')\n",
"# 載入LabelEncoder\n",
"le=LabelEncoder()\n",
"le.classes_ =np.load('../model/20190618224324/classes.npy')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 影像預處理\n",
"def prewhiten(x):\n",
" if x.ndim == 4:\n",
" axis = (1, 2, 3)\n",
" size = x[0].size\n",
" elif x.ndim == 3:\n",
" axis = (0, 1, 2)\n",
" size = x.size\n",
" else:\n",
" print(x.ndim)\n",
" raise ValueError('Dimension should be 3 or 4')\n",
"\n",
" mean = np.mean(x, axis=axis, keepdims=True)\n",
" std = np.std(x, axis=axis, keepdims=True)\n",
" std_adj = np.maximum(std, 1.0/np.sqrt(size))\n",
" y = (x - mean) / std_adj\n",
" return y\n",
"\n",
"def l2_normalize(x, axis=-1, epsilon=1e-10):\n",
" output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))\n",
" return output"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 人臉偵測處理回傳結果矩陣\n",
"image_size=160\n",
"def face_cropped(img,faces, margin): \n",
" aligned_images = []\n",
" for f in faces:\n",
" (x, y, w, h) = f\n",
" cropped = img[y-margin//2:y+h+margin//2,x-margin//2:x+w+margin//2, :]\n",
" aligned = resize(cropped, (image_size, image_size), mode='reflect')\n",
" aligned_images.append(aligned)\n",
" \n",
" return np.array(aligned_images)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 取得人臉Facenet預測之特徵值\n",
"def calc_embs(faces, margin=10, batch_size=1):\n",
" aligned_images = prewhiten(faces)\n",
" pd = []\n",
" for start in range(0, len(aligned_images), batch_size):\n",
" pd.append(model.predict_on_batch(aligned_images[start:start+batch_size]))\n",
" embs = l2_normalize(np.concatenate(pd))\n",
"\n",
" return embs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 人臉辨識推斷\n",
"def infer(le, clf, img):\n",
" faces = face_cascade.detectMultiScale(img,scaleFactor=1.1,minNeighbors=3)\n",
" if(len(faces)==0):\n",
" return '偵測不到人臉請重新調整'\n",
" \n",
" embs = calc_embs(face_cropped(img,faces,10))\n",
"# pred = le.inverse_transform(clf.predict(embs))\n",
" pred=get_labels(le,clf,embs)\n",
" return [faces,pred]\n",
"# Labels 解析\n",
"def get_labels(le,clf,embs):\n",
" socres=clf.predict_proba(embs)\n",
" print(socres)\n",
" results=[]\n",
" for s in socres:\n",
" if(s[s.argmax()]>0.5):\n",
" results.append(le.inverse_transform([s.argmax()])[0])\n",
" else:\n",
" results.append('Unknow')\n",
" return results"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"img=cv2.imread('../data/Test/Kevin04.jpg')\n",
"# img2=cv2.imread('../data/Test/test6.jpg')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"result=infer(le,clf,img)\n",
"# result2=infer(le,clf,img2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 顯示結果\n",
"if(len(result)==2): \n",
" fa=result[0]\n",
" na=result[1]\n",
" for f,n in zip(fa,na):\n",
" x,y,w,h=f\n",
" margin=10\n",
" cv2.rectangle(img,(x-margin//2,y-margin//2),(x+w+margin//2,y+h+margin//2),(0,0,255))\n",
" cv2.putText(img, n, (x-margin//2,y-margin//2), cv2.FONT_HERSHEY_PLAIN,1, (0, 255, 255), 1, cv2.LINE_AA)\n",
" cv2.imshow('frame',img)\n",
" cv2.waitKey(0)\n",
" cv2.destroyAllWindows()\n",
"else:\n",
" print(result)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"faces = face_cascade.detectMultiScale(img,scaleFactor=1.1,minNeighbors=3)\n",
"if(len(faces)!=0):\n",
" err,embs = calc_embs(face_cropped(faces,10))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"socre=clf.predict_proba(embs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"get_labels(le,clf,embs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fa=result1[0]\n",
"na=result1[1]\n",
"print(f,n)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for f,n in zip(fa,na):\n",
" print(str(f) + n)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cv2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cascade_path = '../model/cv2/haarcascade_frontalface_alt2.xml'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"face_cascade=cv2.CascadeClassifier(cascade_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"img=cv2.imread(\"../data/images/Yutanek/Yutanek03.jpg\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"margin=10"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"x,y,w,h=face_cascade.detectMultiScale(img)[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"result=cv2.rectangle(img,(x-margin//2,y-margin//2),(x+w+margin//2,y+h+margin//2),(0,0,255))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cv2.imshow(\"frame\",img[y:y+h,x:x+w])\n",
"cv2.imshow(\"result\",result)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# real time face detention"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cap=cv2.VideoCapture(0)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"while(cap.isOpened()):\n",
" ret,frame=cap.read()\n",
" if(ret):\n",
" result=face_cascade.detectMultiScale(frame)\n",
" if(len(result)==0):\n",
" continue\n",
" x,y,w,h=result[0]\n",
" cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255))\n",
" cv2.imshow(\"frame\",frame)\n",
" if(cv2.waitKey(33)==27):\n",
" break\n",
"cap.release()\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# SVM People Traning"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"from sklearn.svm import SVC\n",
"from sklearn.preprocessing import LabelEncoder\n",
"from skimage.transform import resize\n",
"import os\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cascade_path = '../model/cv2/haarcascade_frontalface_alt2.xml'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"image_dir_basepath=\"../data/people/train/\"\n",
"image_size = 160\n",
"names=[]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for f in os.listdir(os.path.abspath(image_dir_basepath)):\n",
" if(os.path.isdir(os.path.join(os.path.abspath(image_dir_basepath),f))):\n",
" names.append(f)\n",
"print(names)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def prewhiten(x):\n",
" if x.ndim == 4:\n",
" axis = (1, 2, 3)\n",
" size = x[0].size\n",
" elif x.ndim == 3:\n",
" axis = (0, 1, 2)\n",
" size = x.size\n",
" else:\n",
" print(x.ndim)\n",
" raise ValueError('Dimension should be 3 or 4')\n",
"\n",
" mean = np.mean(x, axis=axis, keepdims=True)\n",
" std = np.std(x, axis=axis, keepdims=True)\n",
" std_adj = np.maximum(std, 1.0/np.sqrt(size))\n",
" y = (x - mean) / std_adj\n",
" return y\n",
"\n",
"def l2_normalize(x, axis=-1, epsilon=1e-10):\n",
" output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))\n",
" return output"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def load_and_align_images(filepaths, margin):\n",
" cascade = cv2.CascadeClassifier(cascade_path)\n",
" \n",
" aligned_images = []\n",
" for filepath in filepaths:\n",
" img = cv2.imread(filepath)\n",
"\n",
" faces = cascade.detectMultiScale(img,\n",
" scaleFactor=1.1,\n",
" minNeighbors=3)\n",
" if(len(faces)==0):\n",
" continue\n",
" (x, y, w, h) = faces[0]\n",
" cropped = img[y-margin//2:y+h+margin//2,\n",
" x-margin//2:x+w+margin//2, :]\n",
" aligned = resize(cropped, (image_size, image_size), mode='reflect')\n",
" aligned_images.append(aligned)\n",
" \n",
" return np.array(aligned_images)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def calc_embs(filepaths, margin=10, batch_size=1):\n",
" print(load_and_align_images(filepaths, margin))\n",
" aligned_images = prewhiten(load_and_align_images(filepaths, margin))\n",
" pd = []\n",
" for start in range(0, len(aligned_images), batch_size):\n",
" pd.append(model.predict_on_batch(aligned_images[start:start+batch_size]))\n",
" embs = l2_normalize(np.concatenate(pd))\n",
"\n",
" return embs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def train(dir_basepath, names, max_num_img=10):\n",
" labels = []\n",
" embs = []\n",
" for name in names:\n",
" dirpath = os.path.abspath(dir_basepath + name)\n",
" filepaths = [os.path.join(dirpath, f) for f in os.listdir(dirpath)][:max_num_img]\n",
" embs_ = calc_embs(filepaths) \n",
" labels.extend([name] * len(embs_))\n",
" embs.append(embs_)\n",
" \n",
" embs = np.concatenate(embs)\n",
" le = LabelEncoder().fit(labels)\n",
" y = le.transform(labels)\n",
" clf = SVC(kernel='linear', probability=True).fit(embs, y)\n",
" return le, clf"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def infer(le, clf, filepaths):\n",
" embs = calc_embs(filepaths)\n",
" pred = le.inverse_transform(clf.predict(embs))\n",
" return embs,pred"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"le, clf = train(image_dir_basepath, names)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_dirpath = os.path.abspath(\"../data/images/Test/\")\n",
"test_filepaths = [os.path.join(test_dirpath, f) for f in os.listdir(test_dirpath)]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"embs_,pred_ = infer(le, clf, test_filepaths)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pred_"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Somthing Test"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from keras.models import model_from_json"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model=model_from_json(open(\"../model/keras/facenet_model.json\",\"r\").read())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.load_weights(\"../model/keras/facenet_weights.h5\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.predict(img)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def load_and_align_images(filepaths, margin):\n",
" cascade = cv2.CascadeClassifier(cascade_path)\n",
" \n",
" aligned_images = []\n",
" for filepath in filepaths:\n",
" img = cv2.imread(filepath)\n",
"\n",
" faces = cascade.detectMultiScale(img,\n",
" scaleFactor=1.1,\n",
" minNeighbors=3)\n",
" (x, y, w, h) = faces[0]\n",
" cropped = img[y-margin//2:y+h+margin//2,\n",
" x-margin//2:x+w+margin//2, :]\n",
" aligned = resize(cropped, (image_size, image_size), mode='reflect')\n",
" aligned_images.append(aligned)\n",
" \n",
" return np.array(aligned_images)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"margin=10\n",
"cascade = cv2.CascadeClassifier(cascade_path)\n",
"img = cv2.imread(\"../data/images/BillGates/Bill_Gates_0003.jpg\")\n",
"faces = cascade.detectMultiScale(img,scaleFactor=1.1,minNeighbors=3)\n",
"(x, y, w, h) = faces[0]\n",
"cropped = img[y-margin//2:y+h+margin//2,x-margin//2:x+w+margin//2, :]\n",
"aligned = resize(cropped, (image_size, image_size), mode='reflect')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aligned_images = prewhiten(aligned)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cv2.imshow(\"test\",aligned_images)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment