From 3b392c67dbeb7b2267015ffbeb2aac70dfc01870 Mon Sep 17 00:00:00 2001 From: Imran Rashid <imran@quantifind.com> Date: Mon, 16 Jul 2012 18:25:15 -0700 Subject: [PATCH] fix up scaladoc, naming of type parameters --- core/src/main/scala/spark/Accumulators.scala | 24 ++++++++++---------- core/src/main/scala/spark/SparkContext.scala | 3 --- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/core/src/main/scala/spark/Accumulators.scala b/core/src/main/scala/spark/Accumulators.scala index 5a1ca49626..16e3657898 100644 --- a/core/src/main/scala/spark/Accumulators.scala +++ b/core/src/main/scala/spark/Accumulators.scala @@ -19,7 +19,7 @@ class Accumulable[T,R] ( /** * add more data to this accumulator / accumulable - * @param term + * @param term the data to add */ def += (term: R) { value_ = param.addAccumulator(value_, term) } @@ -27,7 +27,7 @@ class Accumulable[T,R] ( * merge two accumulable objects together * <p> * Normally, a user will not want to use this version, but will instead call `+=`. - * @param term + * @param term the other Accumulable that will get merged with this */ def ++= (term: T) { value_ = param.addInPlace(value_, term)} def value = this.value_ @@ -64,33 +64,33 @@ trait AccumulatorParam[T] extends AccumulableParam[T,T] { /** * A datatype that can be accumulated, ie. has a commutative & associative +. - * <p> + * * You must define how to add data, and how to merge two of these together. For some datatypes, these might be * the same operation (eg., a counter). In that case, you might want to use [[spark.AccumulatorParam]]. They won't * always be the same, though -- eg., imagine you are accumulating a set. You will add items to the set, and you * will union two sets together. * - * @tparam T the full accumulated data - * @tparam R partial data that can be added in + * @tparam R the full accumulated data + * @tparam T partial data that can be added in */ -trait AccumulableParam[T,R] extends Serializable { +trait AccumulableParam[R,T] extends Serializable { /** * Add additional data to the accumulator value. * @param t1 the current value of the accumulator * @param t2 the data to be added to the accumulator * @return the new value of the accumulator */ - def addAccumulator(t1: T, t2: R) : T + def addAccumulator(t1: R, t2: T) : R /** * merge two accumulated values together - * @param t1 - * @param t2 - * @return + * @param t1 one set of accumulated data + * @param t2 another set of accumulated data + * @return both data sets merged together */ - def addInPlace(t1: T, t2: T): T + def addInPlace(t1: R, t2: R): R - def zero(initialValue: T): T + def zero(initialValue: R): R } // TODO: The multi-thread support in accumulators is kind of lame; check diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index ea85324c35..32f37822a5 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -287,11 +287,8 @@ class SparkContext( /** * create an accumulatable shared variable, with a `+=` method - * @param initialValue - * @param param * @tparam T accumulator type * @tparam R type that can be added to the accumulator - * @return */ def accumulable[T,R](initialValue: T)(implicit param: AccumulableParam[T,R]) = new Accumulable(initialValue, param) -- GitLab