< opt.Epoch; i++ { // update parameter by gradient descent org_thetas := make([]float64, cap(thetas)) copy(org_thetas, thetas) shuffled := dataset.Shuffle() for j, _ := range thetas { // compute gradient gradient := gradient(shuffled, org_thetas, j, batchSize) // update parameter thetas[j] = org_thetas[j] - (opt.LearingRate * gradient) } }