Skip to content
Snippets Groups Projects
Commit 92675471 authored by Zheng RuiFeng's avatar Zheng RuiFeng Committed by Sean Owen
Browse files

[MINOR][DOC] Fix doc style in ml.ann.Layer and MultilayerPerceptronClassifier

## What changes were proposed in this pull request?
1, fix the indentation
2, add a missing param desc

## How was this patch tested?
unit tests

Author: Zheng RuiFeng <ruifengz@foxmail.com>

Closes #12499 from zhengruifeng/fix_doc.
parent bf95b8da
No related branches found
No related tags found
No related merge requests found
......@@ -88,7 +88,7 @@ private[ann] trait LayerModel extends Serializable {
* LayerModel implementation and the stack (batch) size
* Developer is responsible for checking the size of output
* when writing to it
*
*
* @param data data
* @param output output (modified in place)
*/
......@@ -100,8 +100,8 @@ private[ann] trait LayerModel extends Serializable {
* LayerModel implementation and the stack (batch) size
* Developer is responsible for checking the size of
* prevDelta when writing to it
*
* @param delta delta of this layer
*
* @param delta delta of this layer
* @param output output of this layer
* @param prevDelta the previous delta (modified in place)
*/
......@@ -185,7 +185,7 @@ private[ann] object AffineLayerModel {
/**
* Creates a model of Affine layer
*
*
* @param layer layer properties
* @param weights vector for weights initialization
* @param random random number generator
......@@ -202,8 +202,8 @@ private[ann] object AffineLayerModel {
* where a is chosen in a such way that the weight variance corresponds
* to the points to the maximal curvature of the activation function
* (which is approximately 2.38 for a standard sigmoid)
*
* @param numIn number of inputs
*
* @param numIn number of inputs
* @param numOut number of outputs
* @param weights vector for weights initialization
* @param random random number generator
......@@ -354,7 +354,7 @@ private[ann] trait TopologyModel extends Serializable {
val layerModels: Array[LayerModel]
/**
* Forward propagation
*
*
* @param data input data
* @return array of outputs for each of the layers
*/
......@@ -362,7 +362,7 @@ private[ann] trait TopologyModel extends Serializable {
/**
* Prediction of the model
*
*
* @param data input data
* @return prediction
*/
......@@ -370,7 +370,7 @@ private[ann] trait TopologyModel extends Serializable {
/**
* Computes gradient for the network
*
*
* @param data input data
* @param target target output
* @param cumGradient cumulative gradient
......@@ -384,7 +384,7 @@ private[ann] trait TopologyModel extends Serializable {
/**
* Feed forward ANN
*
* @param layers
* @param layers Array of layers
*/
private[ann] class FeedForwardTopology private(val layers: Array[Layer]) extends Topology {
override def model(weights: Vector): TopologyModel = FeedForwardModel(this, weights)
......@@ -398,7 +398,7 @@ private[ann] class FeedForwardTopology private(val layers: Array[Layer]) extends
private[ml] object FeedForwardTopology {
/**
* Creates a feed forward topology from the array of layers
*
*
* @param layers array of layers
* @return feed forward topology
*/
......@@ -408,7 +408,7 @@ private[ml] object FeedForwardTopology {
/**
* Creates a multi-layer perceptron
*
*
* @param layerSizes sizes of layers including input and output size
* @param softmaxOnTop wether to use SoftMax or Sigmoid function for an output layer.
* Softmax is default
......@@ -534,7 +534,7 @@ private[ann] object FeedForwardModel {
/**
* Creates a model from a topology and weights
*
*
* @param topology topology
* @param weights weights
* @return model
......@@ -546,7 +546,7 @@ private[ann] object FeedForwardModel {
/**
* Creates a model given a topology and seed
*
*
* @param topology topology
* @param seed seed for generating the weights
* @return model
......@@ -610,7 +610,7 @@ private[ann] class DataStacker(stackSize: Int, inputSize: Int, outputSize: Int)
/**
* Stacks the data
*
*
* @param data RDD of vector pairs
* @return RDD of double (always zero) and vector that contains the stacked vectors
*/
......@@ -643,7 +643,7 @@ private[ann] class DataStacker(stackSize: Int, inputSize: Int, outputSize: Int)
/**
* Unstack the stacked vectors into matrices for batch operations
*
*
* @param data stacked vector
* @return pair of matrices holding input and output data and the real stack size
*/
......@@ -714,7 +714,7 @@ private[ml] class FeedForwardTrainer(
/**
* Sets weights
*
*
* @param value weights
* @return trainer
*/
......@@ -725,7 +725,7 @@ private[ml] class FeedForwardTrainer(
/**
* Sets the stack size
*
*
* @param value stack size
* @return trainer
*/
......@@ -737,7 +737,7 @@ private[ml] class FeedForwardTrainer(
/**
* Sets the SGD optimizer
*
*
* @return SGD optimizer
*/
def SGDOptimizer: GradientDescent = {
......@@ -748,7 +748,7 @@ private[ml] class FeedForwardTrainer(
/**
* Sets the LBFGS optimizer
*
*
* @return LBGS optimizer
*/
def LBFGSOptimizer: LBFGS = {
......@@ -759,7 +759,7 @@ private[ml] class FeedForwardTrainer(
/**
* Sets the updater
*
*
* @param value updater
* @return trainer
*/
......@@ -771,7 +771,7 @@ private[ml] class FeedForwardTrainer(
/**
* Sets the gradient
*
*
* @param value gradient
* @return trainer
*/
......@@ -801,7 +801,7 @@ private[ml] class FeedForwardTrainer(
/**
* Trains the ANN
*
*
* @param data RDD of input and output vector pairs
* @return model
*/
......
......@@ -37,8 +37,8 @@ private[ml] trait MultilayerPerceptronParams extends PredictorParams
/**
* Layer sizes including input size and output size.
* Default: Array(1, 1)
*
* @group param
*
* @group param
*/
final val layers: IntArrayParam = new IntArrayParam(this, "layers",
"Sizes of layers from input layer to output layer" +
......@@ -56,8 +56,8 @@ private[ml] trait MultilayerPerceptronParams extends PredictorParams
* a partition then it is adjusted to the size of this data.
* Recommended size is between 10 and 1000.
* Default: 128
*
* @group expertParam
*
* @group expertParam
*/
final val blockSize: IntParam = new IntParam(this, "blockSize",
"Block size for stacking input data in matrices. Data is stacked within partitions." +
......@@ -71,7 +71,7 @@ private[ml] trait MultilayerPerceptronParams extends PredictorParams
/**
* Allows setting the solver: minibatch gradient descent (gd) or l-bfgs.
* l-bfgs is the default one.
*
*
* @group expertParam
*/
final val solver: Param[String] = new Param[String](this, "solver",
......@@ -84,8 +84,8 @@ private[ml] trait MultilayerPerceptronParams extends PredictorParams
/**
* Model weights. Can be returned either after training or after explicit setting
*
* @group expertParam
*
* @group expertParam
*/
final val weights: Param[Vector] = new Param[Vector](this, "weights",
" Sets the weights of the model ")
......@@ -156,8 +156,8 @@ class MultilayerPerceptronClassifier @Since("1.5.0") (
/**
* Set the maximum number of iterations.
* Default is 100.
*
* @group setParam
*
* @group setParam
*/
@Since("1.5.0")
def setMaxIter(value: Int): this.type = set(maxIter, value)
......@@ -166,24 +166,24 @@ class MultilayerPerceptronClassifier @Since("1.5.0") (
* Set the convergence tolerance of iterations.
* Smaller value will lead to higher accuracy with the cost of more iterations.
* Default is 1E-4.
*
* @group setParam
*
* @group setParam
*/
@Since("1.5.0")
def setTol(value: Double): this.type = set(tol, value)
/**
* Set the seed for weights initialization if weights are not set
*
* @group setParam
*
* @group setParam
*/
@Since("1.5.0")
def setSeed(value: Long): this.type = set(seed, value)
/**
* Sets the model weights.
*
* @group expertParam
*
* @group expertParam
*/
@Since("2.0.0")
def setWeights(value: Vector): this.type = set(weights, value)
......@@ -232,8 +232,8 @@ object MultilayerPerceptronClassifier
* :: Experimental ::
* Classification model based on the Multilayer Perceptron.
* Each layer has sigmoid activation function, output layer has softmax.
*
* @param uid uid
*
* @param uid uid
* @param layers array of layer sizes including input and output layers
* @param weights vector of initial weights for the model that consists of the weights of layers
* @return prediction model
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment