Skip to content
Snippets Groups Projects
Commit 4b5f12ba authored by Reynold Xin's avatar Reynold Xin
Browse files

[SPARK-7979] Enforce structural type checker.

Author: Reynold Xin <rxin@databricks.com>

Closes #6536 from rxin/structural-type-checker and squashes the following commits:

f833151 [Reynold Xin] Fixed compilation.
633f9a1 [Reynold Xin] Fixed typo.
d1fa804 [Reynold Xin] [SPARK-7979] Enforce structural type checker.
parent 63a50be1
No related branches found
No related tags found
No related merge requests found
......@@ -28,7 +28,7 @@ import scala.language.reflectiveCalls
class XORShiftRandomSuite extends SparkFunSuite with Matchers {
def fixture: Object {val seed: Long; val hundMil: Int; val xorRand: XORShiftRandom} = new {
private def fixture = new {
val seed = 1L
val xorRand = new XORShiftRandom(seed)
val hundMil = 1e8.toInt
......
......@@ -22,7 +22,6 @@ import scala.language.reflectiveCalls
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.regression.LabeledPoint
......@@ -354,7 +353,11 @@ object DecisionTreeRunner {
/**
* Calculates the mean squared error for regression.
*
* This is just for demo purpose. In general, don't copy this code because it is NOT efficient
* due to the use of structural types, which leads to one reflection call per record.
*/
// scalastyle:off structural.type
private[mllib] def meanSquaredError(
model: { def predict(features: Vector): Double },
data: RDD[LabeledPoint]): Double = {
......@@ -363,4 +366,5 @@ object DecisionTreeRunner {
err * err
}.mean()
}
// scalastyle:on structural.type
}
......@@ -41,14 +41,16 @@ abstract class EdgeRDD[ED](
@transient sc: SparkContext,
@transient deps: Seq[Dependency[_]]) extends RDD[Edge[ED]](sc, deps) {
// scalastyle:off structural.type
private[graphx] def partitionsRDD: RDD[(PartitionID, EdgePartition[ED, VD])] forSome { type VD }
// scalastyle:on structural.type
override protected def getPartitions: Array[Partition] = partitionsRDD.partitions
override def compute(part: Partition, context: TaskContext): Iterator[Edge[ED]] = {
val p = firstParent[(PartitionID, EdgePartition[ED, _])].iterator(part, context)
if (p.hasNext) {
p.next._2.iterator.map(_.copy())
p.next()._2.iterator.map(_.copy())
} else {
Iterator.empty
}
......
......@@ -37,11 +37,13 @@ import org.apache.spark.storage.StorageLevel
*/
private[ml] trait OneVsRestParams extends PredictorParams {
// scalastyle:off structural.type
type ClassifierType = Classifier[F, E, M] forSome {
type F
type M <: ClassificationModel[F, M]
type E <: Classifier[F, E, M]
}
// scalastyle:on structural.type
/**
* param for the base binary classifier that we reduce multiclass classification into.
......
......@@ -114,6 +114,9 @@
<!-- <parameter name="maximum"><![CDATA[10]]></parameter> -->
<!-- </parameters> -->
<!-- </check> -->
<check level="error" class="org.scalastyle.scalariform.StructuralTypeChecker" enabled="true"></check>
<check level="error" class="org.scalastyle.scalariform.UppercaseLChecker" enabled="true"></check>
<check level="error" class="org.scalastyle.scalariform.SimplifyBooleanExpressionChecker" enabled="false"></check>
<check level="error" class="org.scalastyle.scalariform.IfBraceChecker" enabled="true">
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment