Skip to content
Snippets Groups Projects
Commit 07da72b4 authored by Shivaram Venkataraman's avatar Shivaram Venkataraman
Browse files

Remove duplicate loss history and clarify why.

Also some minor style fixes.
parent fe7298b5
No related branches found
No related tags found
No related merge requests found
...@@ -151,7 +151,6 @@ object LogisticRegressionLocalRandomSGD { ...@@ -151,7 +151,6 @@ object LogisticRegressionLocalRandomSGD {
input: RDD[(Int, Array[Double])], input: RDD[(Int, Array[Double])],
numIterations: Int, numIterations: Int,
stepSize: Double, stepSize: Double,
miniBatchFraction: Double, miniBatchFraction: Double,
initialWeights: Array[Double]) initialWeights: Array[Double])
: LogisticRegressionModel = : LogisticRegressionModel =
...@@ -174,7 +173,6 @@ object LogisticRegressionLocalRandomSGD { ...@@ -174,7 +173,6 @@ object LogisticRegressionLocalRandomSGD {
input: RDD[(Int, Array[Double])], input: RDD[(Int, Array[Double])],
numIterations: Int, numIterations: Int,
stepSize: Double, stepSize: Double,
miniBatchFraction: Double) miniBatchFraction: Double)
: LogisticRegressionModel = : LogisticRegressionModel =
{ {
...@@ -195,8 +193,7 @@ object LogisticRegressionLocalRandomSGD { ...@@ -195,8 +193,7 @@ object LogisticRegressionLocalRandomSGD {
def train( def train(
input: RDD[(Int, Array[Double])], input: RDD[(Int, Array[Double])],
numIterations: Int, numIterations: Int,
stepSize: Double stepSize: Double)
)
: LogisticRegressionModel = : LogisticRegressionModel =
{ {
train(input, numIterations, stepSize, 1.0) train(input, numIterations, stepSize, 1.0)
......
...@@ -61,7 +61,7 @@ object GradientDescent { ...@@ -61,7 +61,7 @@ object GradientDescent {
// Initialize weights as a column vector // Initialize weights as a column vector
var weights = new DoubleMatrix(initialWeights.length, 1, initialWeights:_*) var weights = new DoubleMatrix(initialWeights.length, 1, initialWeights:_*)
var reg_val = 0.0 var regVal = 0.0
for (i <- 1 to numIters) { for (i <- 1 to numIters) {
val (gradientSum, lossSum) = data.sample(false, miniBatchFraction, 42+i).map { val (gradientSum, lossSum) = data.sample(false, miniBatchFraction, 42+i).map {
...@@ -71,15 +71,14 @@ object GradientDescent { ...@@ -71,15 +71,14 @@ object GradientDescent {
(grad, loss) (grad, loss)
}.reduce((a, b) => (a._1.addi(b._1), a._2 + b._2)) }.reduce((a, b) => (a._1.addi(b._1), a._2 + b._2))
stochasticLossHistory.append(lossSum / miniBatchSize + reg_val) /**
* NOTE(Xinghao): lossSum is computed using the weights from the previous iteration
* and regVal is the regularization value computed in the previous iteration as well.
*/
stochasticLossHistory.append(lossSum / miniBatchSize + regVal)
val update = updater.compute(weights, gradientSum.div(miniBatchSize), stepSize, i, regParam) val update = updater.compute(weights, gradientSum.div(miniBatchSize), stepSize, i, regParam)
weights = update._1 weights = update._1
reg_val = update._2 regVal = update._2
stochasticLossHistory.append(lossSum / miniBatchSize + reg_val)
/*
* NOTE(Xinghao): The loss here is sum of lossSum computed using the weights before applying updater,
* and reg_val using weights after applying updater
*/
} }
(weights.toArray, stochasticLossHistory.toArray) (weights.toArray, stochasticLossHistory.toArray)
......
...@@ -76,7 +76,7 @@ class SquaredL2Updater extends Updater { ...@@ -76,7 +76,7 @@ class SquaredL2Updater extends Updater {
val thisIterStepSize = stepSize / math.sqrt(iter) val thisIterStepSize = stepSize / math.sqrt(iter)
val normGradient = gradient.mul(thisIterStepSize) val normGradient = gradient.mul(thisIterStepSize)
val newWeights = weightsOld.sub(normGradient).div(2.0 * thisIterStepSize * regParam + 1.0) val newWeights = weightsOld.sub(normGradient).div(2.0 * thisIterStepSize * regParam + 1.0)
(newWeights, pow(newWeights.norm2,2.0) * regParam) (newWeights, pow(newWeights.norm2, 2.0) * regParam)
} }
} }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment