Slide 1

Slide 1 text

C-LIS CO., LTD.

Slide 2

Slide 2 text

C-LIS CO., LTD. ༗ࢁܓೋʢ,FJKJ"3*:"."ʣ $-*4$0 -5% Photo : Koji MORIGUCHI (MORIGCHOWDER) "OESPJEΞϓϦ։ൃνϣοτσΩϧ ʮझຯͰػցֶशΛ΍͍ͬͯΔऀͩʯ ΍ͬͯ·ͤΜ

Slide 3

Slide 3 text

C-LIS CO., LTD. IUUQTLOPXMFEHFTBLVSBBEKQ

Slide 4

Slide 4 text

5FOTPS'MPX6TFS(SPVQ,"/4"* BU4BOTBOגࣜձࣾ 5FOTPS'MPXʹҠߦͨ͠࿩ 5FOTPS'MPXͰझຯͷը૾ऩूαʔόʔΛ࡞Δ೥݄߸

Slide 5

Slide 5 text

5FOTPS'MPX͸
 ͱͯ΋ྑ͍ϑϨʔϜϫʔΫͰ͢ɻ

Slide 6

Slide 6 text

conv2d (from tensorflow.python.layers.convolutional) is deprecated and will be removed in a future version. Instructions for updating: Use `tf.keras.layers.Conv2D` instead. flatten (from tensorflow.python.layers.core) is deprecated and will be removed in a future version. Instructions for updating: Use keras.layers.flatten instead. dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version. Instructions for updating: Use keras.layers.dense instead. dropout (from tensorflow.python.layers.core) is deprecated and will be removed in a future version. Instructions for updating: Use keras.layers.dropout instead. JTEFQSFDBUFEBOEXJMMCFSFNPWFEJOBGVUVSFWFSTJPO

Slide 7

Slide 7 text

def layers(tag_name, image, training=False, reuse=None): scope = '%s/%s' % (tag_name, MODEL_NAME) with tf.variable_scope(scope, reuse=reuse): conv = tf.layers.conv2D(image, BASE_CHANNEL, [3, 3], [1, 1], padding='SAME', activation=tf.nn.relu, use_bias=True, name='conv') UGMBZFST

Slide 8

Slide 8 text

def layers(tag_name, image, training=False, reuse=None): scope = '%s/%s' % (tag_name, MODEL_NAME) with tf.variable_scope(scope, reuse=reuse): conv = tf.keras.layers.Conv2D(BASE_CHANNEL, [3, 3], [1, 1], padding='SAME', activation=tf.nn.relu, use_bias=True, name='conv')(image) UGLFSBTMBZFST

Slide 9

Slide 9 text

IUUQTUXJUUFSDPNLFJKJ@BSJZBNBTUBUVT

Slide 10

Slide 10 text

IUUQTXXXGBDFCPPLDPNHSPVQTQFSNBMJOL

Slide 11

Slide 11 text

Slide 12

Slide 12 text

ౖΓͷ(JU)VC*TTVF IUUQTHJUIVCDPNUFOTPSqPXUFOTPSqPXJTTVFT tf.keras.layers (in TF 1.13.1): variable_scope does not work

Slide 13

Slide 13 text

def layers(tag_name, image, training=False, reuse=None): scope = '%s/%s' % (tag_name, MODEL_NAME) with tf.variable_scope(scope, reuse=reuse): conv = tf.keras.layers.Conv2D(BASE_CHANNEL, [3, 3], [1, 1], padding='SAME', activation=tf.nn.relu, use_bias=True, name='conv')(image) UGWBSJBCMF@TDPQF

Slide 14

Slide 14 text

def _build_train_graph(train_path_list): image_batch, label_batch = _load_dataset(train_path_list, is_train=True) logits = model.layers('megane', image_batch, training=True) train_op = _optimize(logits, label_batch) return train_op def _build_eval_graph(eval_path_list): image_batch, label_batch = _load_dataset(eval_path_list, is_train=False) logits = model.layers('megane', image_batch, reuse=True) accuracy = _calc_accuracy(logits, label_batch) return accuracy Ϟσϧͷ࠶ར༻

Slide 15

Slide 15 text

5FOTPS'MPX

Slide 16

Slide 16 text

LABEL_BYTE = 1 IMAGE_BYTES = 32 * 32 * 3 RECORD_BYTES = LABEL_BYTE + IMAGE_BYTES def _load_dataset(data_path_list): def _process_record(record): value = tf.io.decode_raw(record, tf.uint8) label = value[0] image = value[1:] image = tf.reshape(image, (3, 32, 32)) image = tf.transpose(image, (1, 2, 0)) image = tf.cast(image, tf.float32) image = image / 255 return image, label dataset = tf.data.FixedLengthRecordDataset( data_path_list, RECORD_BYTES) return dataset.map(_process_record) %BUBTFU"1*

Slide 17

Slide 17 text

class MyModel(Model): def __init__(self): super(MyModel, self).__init__() self.conv1 = Conv2D(32, 3, activation='relu') self.flatten = Flatten() self.d1 = Dense(128, activation='relu') self.d2 = Dense(10, activation='softmax') def call(self, x): x = self.conv1(x) x = self.flatten(x) x = self.d1(x) return self.d2(x) # Create an instance of the model model = MyModel() Ϟσϧͷఆٛ

Slide 18

Slide 18 text

loss_object = tf.keras.losses.SparseCategoricalCrossentropy() optimizer = tf.keras.optimizers.Adam() ଛࣦؔ਺ɾ࠷దԽΞϧΰϦζϜ

Slide 19

Slide 19 text

train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy') @tf.function def train_step(images, labels): with tf.GradientTape() as tape: predictions = model(images) loss = loss_object(labels, predictions) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train_loss(loss) train_accuracy(labels, predictions) ֶशͱධՁ

Slide 20

Slide 20 text

test_loss = tf.keras.metrics.Mean(name='test_loss') test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy') @tf.function def test_step(images, labels): predictions = model(images) t_loss = loss_object(labels, predictions) test_loss(t_loss) test_accuracy(labels, predictions) ֶशͱධՁ

Slide 21

Slide 21 text

EPOCHS = 5 SUMMARY_DIR = './summary' TRAIN_BATCH_SIZE = 32 TEST_BATCH_SIZE = 32 import time train_dataset = _load_dataset(train_path_list).batch(TRAIN_BATCH_SIZE) test_dataset = _load_dataset(test_path_list).batch(TEST_BATCH_SIZE) ֶशͱධՁ

Slide 22

Slide 22 text

for epoch in range(EPOCHS): for images, labels in train_dataset: train_step(images, labels) for test_images, test_labels in test_dataset: test_step(test_images, test_labels) template = 'Epoch {}, Train Loss: {}, Train Accuracy: {}, Test Loss: {}, Test Accuracy: {}' print(template.format(epoch+1, train_loss.result(), train_accuracy.result()*100, test_loss.result(), test_accuracy.result()*100)) # Reset the metrics for the next epoch train_loss.reset_states() train_accuracy.reset_states() test_loss.reset_states() test_accuracy.reset_states()

Slide 23

Slide 23 text

2VBOUJ[BUJPOBXBSF5SBJOJOH

Slide 24

Slide 24 text

ϞσϧʢύϥϝʔλʔʣͷྔࢠԽ CJUුಈখ਺఺਺ΛCJU੔਺ʹม׵ʢྔࢠԽʣ

Slide 25

Slide 25 text

g = tf.get_default_graph() tf.contrib.quantize.create_training_graph(input_graph=g, quant_delay=100000) 2VBOUJ[BUJPOBXBSF5SBJOJOH •ϑΝΠϯνϡʔχϯάͰ࢖͏ •quant_delayΛ஫ҙਂ͘ઃఆ͢Δ •άϥϑͷॻ͖׵͑Λߦ͏ͨΊɺ
 ύϥϝʔλʔϑΝΠϧͷޓ׵ੑ͸ࣦΘΕΔ

Slide 26

Slide 26 text

2VBOUJ[BUJPOBXBSF5SBJOJOH •ϑΝΠϯνϡʔχϯάͰ࢖͏ •quant_delayΛ஫ҙਂ͘ઃఆ͢Δ •άϥϑͷॻ͖׵͑Λߦ͏ͨΊɺ
 ύϥϝʔλʔϑΝΠϧͷޓ׵ੑ͸ࣦΘΕΔ Quantization-aware ༗Γ
 quant-delay MAX_VALUE Quantization-aware ༗Γ
 quant-delay 0 Quantization-aware ແ͠

Slide 27

Slide 27 text

IUUQTUXJUUFSDPNLFJKJ@BSJZBNBTUBUVT

Slide 28

Slide 28 text

quantization-aware training Λαϙʔτ͍ͯͨ͠ܭࢉάϥϑͷॻ͖׵͑ؔ਺͸ɺTensorFlow 2.0ʹΑΔϞσϧΛ αϙʔτ͠·ͤΜɻ ·ͨɺTensorFlow Lite ͷ quantization API ͸ɺKeras API Λ௨ͯ͡ quantization-aware training Λαϙʔτ͢Δํ޲Ͱ࡞Γ௚͠ͱ߹ཧԽΛקΊ͍ͯΔ࠷தͰ͢ɻ ৽͍͠ quantization API ͕ϩʔϯν ͞ΕΔ·Ͱ͸ɺ͜ΕΒͷଐੑ͸ 2.0 API ͔Β࡟আ͞Ε·͢ɻ ॻ͖׵͑ؔ਺ʹΑͬͯϞσϧΛม׵͍ͨ͠৔߹ ͸ tf.compat.v1.TFLiteConverter Λ࢖͍ͬͯͩ͘͞ɻ IUUQTXXXUFOTPSqPXPSHMJUFDPOWFSUQZUIPO@BQJRVBOUJ[BUJPOBXBSF@USBJOJOH

Slide 29

Slide 29 text

ʹ͢ΔͳΒͤΊͯɺ
 Yʹ͋Δػೳ͸࢖͑ΔΑ͏ʹ͍ͯͩ͘͠͞ɻ (PPHMF͞Μ

Slide 30

Slide 30 text

5FOTPS'MPXͰझຯͷը૾ऩूαʔόʔΛ࡞Δ ೥݄߸

Slide 31

Slide 31 text

σϞ

Slide 32

Slide 32 text

֓ཁ ػցֶशج൫ ετϨʔδ
 αʔόʔ σʔλऩूݩ αʔϏε Android
 ΫϥΠΞϯτ Web ΠϯλʔϑΣʔε σʔληοτ؅ཧαʔόʔ σʔλϕʔε σʔλͷऩू MeganeCo Playground ֶशσʔλͷੜ੒ ֶशࡁϞσϧ
 ʹΑΔਪ࿦ Quad GPUs Radeon RX Vega 64 x 2 6TB HDD 256GB SSD ֶश༻σʔλʢTFRecordʣ ֶशࡁϞσϧ

Slide 33

Slide 33 text

ػցֶशج൫ Quad GPUs Radeon RX Vega 64 x 2 ͘͞ΒͷVPSʢ2Gʣ Master Kubernetes Argo workflow

Slide 34

Slide 34 text

ߴՐྗίϯϐϡʔςΟϯάαʔόʔʢݕূ࣮ݧػʣ $16ɹɹɹɹ9FPO&WίΞʷ .FNPSZɹɹ(# ετϨʔδɹ44%(#ʷʢ3"*%ʣ (16ɹɹɹ/7*%*"5*5"/9(# ɹɹɹɹɹ/7*%*"5*5"/9(# ɹɹɹɹɹ/7*%*"(F'PSDF(595J(# ɹɹɹɹɹ/7*%*"(F'PSDF(595J(# ʢDriver Version: 410.48ʣ

Slide 35

Slide 35 text

ߴՐྗίϯϐϡʔςΟϯάαʔόʔʢݕূ࣮ݧػʣ $16ɹɹɹɹ9FPO&WίΞʷ .FNPSZɹɹ(# ετϨʔδɹ44%(#ʷʢ3"*%ʣ (16ɹɹɹ".%3BEFPO7FHB(# ".%3BEFPO7FHB(#

Slide 36

Slide 36 text

Slide 37

Slide 37 text

Label input_size Positive sample Negative sample Train step (100 step) Sensitivity Specificity IoU megane 256x256 19,601 46,950 3,920 0.9242 0.9578 0.8726 favorite 256x256 16,921 29,960 1,050 0.8545 0.7926 0.7377 illust 256x256 24,421 38,587 1,825 0.9601 0.9707 0.9434 photo 256x256 16,296 30,606 1,725 0.9651 0.9339 0.9141 comic 256x256 18,646 4,067 900 0.9950 0.9727 0.9068 face 256x256 19,573 25,033 2,000 0.9629 0.9168 0.9413 nsfw 256x256 18,002 41,258 3,730 0.8399 0.8748 0.7179 nsfw(mobile) 128x128 18,002 41,258 2,230 0.8313 0.8139 0.6637 kemono 256x256 5,863 21,174 1,225 0.8460 0.8383 0.5908 sailor_uniform 256x256 3,060 10,242 1,980 0.8501 0.8954 0.6904 blazer_uniform 256x256 958 8,681 550 0.7126 0.8555 0.3229 cat 256x256 5,167 21,417 725 0.8992 0.9325 0.7472 food 256x256 5,873 20,165 995 0.9198 0.9487 0.8173 food(mobile) 128x128 5,873 20,165 1,570 0.9355 0.9366 0.8099

Slide 38

Slide 38 text

IUUQTUXJUUFSDPN@NFHBOFDP

Slide 39

Slide 39 text

Slide 40

Slide 40 text

Slide 41

Slide 41 text

IUUQTUXJUUFSDPNLFJKJ@BSJZBNBTUBUVT

Slide 42

Slide 42 text

def distort(image, model_input_size, channels, use_random_crop, use_random_colorized): with tf.name_scope('distort'): if use_random_crop: with tf.name_scope('random_crop'): image_size_ratio = tf.random.uniform([], 100, 120, tf.int32) image_size = tf.cast(model_input_size * image_size_ratio / 100, tf.int32) image = tf.image.resize(image, (image_size, image_size)) image = tf.image.random_crop( image, (model_input_size, model_input_size, channels) ) else: image = tf.image.resize(image, (model_input_size, model_input_size)) %BUB"VHNFOUBUJPO

Slide 43

Slide 43 text

python ./create_dataset.py \ --base_dir /dataset/source/ \ --output_tfrecords_dir $TFRECORDS_DIR \ --output_catalogs_dir $CATALOGS_DIR \ --image_size 256 σʔληοτੜ੒ 256x256 TFRecord labels 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, ... masks 1, 0, 1, 0, 0, 1, 0, ...

Slide 44

Slide 44 text

σʔληοτੜ੒ Resampling

Slide 45

Slide 45 text

python ./create_dataset.py \ --base_dir /dataset/source/ \ --output_tfrecords_dir $TFRECORDS_DIR \ --output_catalogs_dir $CATALOGS_DIR \ --image_size 512 σʔληοτੜ੒ 512x512 TFRecord labels 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, ... masks 1, 0, 1, 0, 0, 1, 0, ...

Slide 46

Slide 46 text

def distort(image, request_image_size, channels, use_random_crop, use_random_colorized): with tf.name_scope('distort'): if use_random_crop: with tf.name_scope('random_crop'): image_size_ratio = tf.random.uniform([], 100, 120, tf.int32) image_size = tf.cast(request_image_size * image_size_ratio / 100, tf.int32) image = tf.image.resize(image, (image_size, image_size)) image = tf.image.random_crop( image, (request_image_size, request_image_size, channels) ) else: image = tf.image.resize(image, (request_image_size, request_image_size)) %BUB"VHNFOUBUJPO

Slide 47

Slide 47 text

IUUQTUXJUUFSDPNLFJKJ@BSJZBNBTUBUVT NFHBOFͷ൑ఆਫ਼౓͕

Slide 48

Slide 48 text

ࠓޙͷ՝୊ 5FOTPS'MPX΁ͷຊ֨Ҡߦ LFSBTUVOFSͷಋೖʢϋΠύʔύϥϝʔλʔɾαʔνʣ (SBE$".ʢ(SBEJFOUXFJHIUFE$MBTT"DUJWBUJPO.BQQJOHʣͷಋೖ

Slide 49

Slide 49 text

IUUQNFHBOFLLPLZPEBOPSHDPVODJM ׬ച͠·ͨ͠

Slide 50

Slide 50 text

C-LIS CO., LTD. ຊࢿྉ͸ɺ༗ݶձࣾγʔϦεͷஶ࡞෺Ͱ͢ɻຊࢿྉͷશ෦ɺ·ͨ͸Ұ෦ʹ͍ͭͯɺஶ࡞ऀ͔ΒจॻʹΑΔڐ୚Λಘͣʹෳ੡͢Δ͜ͱ͸ې͡ΒΕ͍ͯ·͢ɻ 5IF"OESPJE4UVEJPJDPOJTSFQSPEVDFEPSNPEJpFEGSPNXPSLDSFBUFEBOETIBSFECZ(PPHMFBOEVTFEBDDPSEJOHUPUFSNTEFTDSJCFEJOUIF$SFBUJWF$PNNPOT"UUSJCVUJPO-JDFOTF ֤੡඼໊ɾϒϥϯυ໊ɺձ໊ࣾͳͲ͸ɺҰൠʹ֤ࣾͷ঎ඪ·ͨ͸ొ࿥঎ඪͰ͢ɻຊࢿྉதͰ͸ɺ˜ɺšɺäΛׂѪ͍ͯ͠·͢ɻ 5IF"OESPJESPCPUJTSFQSPEVDFEPSNPEJpFEGSPNXPSLDSFBUFEBOETIBSFECZ(PPHMFBOEVTFEBDDPSEJOHUPUFSNTEFTDSJCFEJOUIF$SFBUJWF$PNNPOT"UUSJCVUJPO-JDFOTF