yujiosaka
April 05, 2016
13k

# ひたすら楽してディープラーニング

April 05, 2016

## Transcript

+BWB4DSJQU

11. ### ୈ3ষɿϕΠζ ୈ5ষɿkNN๏ ୈ6ষɿҰൠԽઢܗϞσϧ ୈ7ষɿχϡʔϥϧωοτϫʔΫ ୈ8ষɿαϙʔτϕΫλʔϚγϯ ӽ͑ΒΕͳ͍น ← ෼͔Δ ← ෼͔Δ

← ෼͔Δ ← ͓ɺ͓͏… ← (ɾ㱼ɾ) ŜŠŘŘ!!

νϡʔτϦΞϧ՝୊

21. ### 1.࠷ۙͷϒʔϜʹ৐͔ͬΔ 2.ۤखͳݴޠ͸࢖Θͳ͍ 3.ߴ౓ͳϥΠϒϥϦ͸࢖Θͳ͍ 4.ϓϨθϯࢿྉ͸ؤுΒͳ͍ ͻͨ͢Βָͯ͠Kaggle ͱΓ͋͑ͣσΟʔϓϥʔχϯά  ࢖ͬͯΈΔ +BWB4DSJQU͔͠࢖Θͳ͍ MPEBTIͱઢܗ୅਺ϥΠϒϥϦ  ͑͋͞Ε͹͍͍

ΤϯδχΞͩ͠ίʔυͰউෛ

24. ### ✓ ӳޠͷΦϯϥΠϯϒοΫʢ೔ຊޠԽ΋ਐߦதʣ ✓ χϡʔϥϧωοτϫʔΫ͔Β  σΟʔϓϥʔχϯά·ͰͷྲྀΕΛৄࡉʹղઆ ✓ Pythonͷαϯϓϧ࣮૷͕ಡΈ΍͍͢ Neural Networks and

Deep Learning Φεεϝʂ

28. ### Python def update_mini_batch(self, mini_batch, eta): nabla_b = [np.zeros(b.shape) for b

in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] for x, y in mini_batch: delta_nabla_b, delta_nabla_w = self.backprop(x, y) nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)] nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)] self.weights = [w-(eta/len(mini_batch))*nw for w, nw in zip(self.weights, nabla_w)] self.biases = [b-(eta/len(mini_batch))*nb for b, nb in zip(self.biases, nabla_b)]
29. ### CoffeeScript updateMiniBatch: (miniBatch, eta) -> nablaB = (Matrix.zeros(b.rows, b.cols) for

b in @biases) nablaW = (Matrix.zeros(w.rows, w.cols) for w in @weights) for [x, y] in miniBatch [deltaNablaB, deltaNablaW] = @backprop(x, y) nablaB = (nb.plus(dnb) for [nb, dnb] in _.zip(nablaB, deltaNablaB)) nablaW = (nw.plus(dnw) for [nw, dnw] in _.zip(nablaW, deltaNablaW)) @weights = (w.minus(nw.mulEach(eta / miniBatch.length))  for [w, nw] in _.zip(@weights, nablaW)) @biases = (b.minus(nb.mulEach(eta / miniBatch.length))  for [b, nb] in _.zip(@biases, nablaB))

31. ### numpy.nan_to_num nanToNum() { let thisData = this.data, rows = this.rows,

cols = this.cols; let row, col, result = new Array(rows); for (row=0; row<rows; ++row) { result[row] = new Array(cols); for (col=0; col<cols; ++col) { result[row][col] = n2n(thisData[row][col]); } } return new Matrix(result); };
32. ### numpy.ravel ravel() { let thisData = this.data, rows = this.rows,

cols = this.cols; let a = new Array(rows * cols); for (let i = 0, jBase = 0; i<rows; ++i, jBase += cols) { for (let j = 0; j<cols; ++j) { a[jBase + j] = thisData[i][j]; } } return a; };

38. ### b ύʔηϓτϩϯϞσϧ x1 x2 x3 output w1 w2 w3 PVUQVU

JGЄKXKYKC≤  JGЄKXKYKC
39. ### 5 ύʔηϓτϩϯϞσϧ ఱؾ͸ྑ͍͔ʁ ൴ঁ͸ߦ͖͍͔ͨʁ ձ৔͸Ӻͷ͔ۙ͘ʁ ͓ࡇΓʹߦ͘ʁ 6 2 2 No

Yes Yes No   ≤
40. ### b γάϞΠυχϡʔϩϯϞσϧ x1 x2 x3 w1 w2 w3 PVUQVU 

 FYQ ЄKXKYKC output

ͦͷଞͷٕज़

58. ### ϋϚͬͨᶃ  ਺͕ࣜ಄ͷதʹೖͬͯ͜ͳ͍໰୊ ීஈ࢖͍׳ΕͨϓϩάϥϜʹͯ͠ΈΕ͹  ҙ֎ͱʮͳΜͩͦΜͳ͜ͱ͔ʯͰࡁΉ͜ͱ΋͋Δ function sigmoid(z) { return 1 /

(1 + Math.exp(-z)); } let output = sigmoid(w.dot(a).plus(b));

60. ### ڭՊॻ௨Γʹ࣮૷͢ΔͱιϑτϚοΫεؔ਺͕ܻ͋;Ε͢Δ ·ͨ΋΍stackoverﬂow͔ΒҾͬு͖ͬͯͨΒಈ͍ͨ ϋϚͬͨᶅ  ڭՊॻʹ͸ॻ͍͍ͯͳ͍໰୊ let max = _.max(vector), tmp =

_.map(vector, (v) => { return Math.exp(v - max); }), sum = _.sum(tmp); return _.map(tmp, (v) => { return v / sum; });