Skip to content
Snippets Groups Projects
Commit d8158ced authored by Matei Zaharia's avatar Matei Zaharia
Browse files

Merge branch 'master' of github.com:mesos/spark

parents 497f5575 f11ad72d
No related branches found
No related tags found
No related merge requests found
......@@ -48,8 +48,7 @@ def update(i, vec, mat, ratings):
if __name__ == "__main__":
if len(sys.argv) < 2:
print >> sys.stderr, \
"Usage: PythonALS <master> <M> <U> <F> <iters> <slices>"
print >> sys.stderr, "Usage: als <master> <M> <U> <F> <iters> <slices>"
exit(-1)
sc = SparkContext(sys.argv[1], "PythonALS", pyFiles=[realpath(__file__)])
M = int(sys.argv[2]) if len(sys.argv) > 2 else 100
......@@ -84,5 +83,5 @@ if __name__ == "__main__":
usb = sc.broadcast(us)
error = rmse(R, ms, us)
print "Iteration %d:" % i
print "Iteration %d:" % i
print "\nRMSE: %5.4f\n" % error
......@@ -41,8 +41,7 @@ def closestPoint(p, centers):
if __name__ == "__main__":
if len(sys.argv) < 5:
print >> sys.stderr, \
"Usage: PythonKMeans <master> <file> <k> <convergeDist>"
print >> sys.stderr, "Usage: kmeans <master> <file> <k> <convergeDist>"
exit(-1)
sc = SparkContext(sys.argv[1], "PythonKMeans")
lines = sc.textFile(sys.argv[2])
......
......@@ -35,7 +35,7 @@ np.random.seed(42)
DataPoint = namedtuple("DataPoint", ['x', 'y'])
from lr import DataPoint # So that DataPoint is properly serialized
from logistic_regression import DataPoint # So that DataPoint is properly serialized
def generateData():
......@@ -48,8 +48,7 @@ def generateData():
if __name__ == "__main__":
if len(sys.argv) == 1:
print >> sys.stderr, \
"Usage: PythonLR <master> [<slices>]"
print >> sys.stderr, "Usage: logistic_regression <master> [<slices>]"
exit(-1)
sc = SparkContext(sys.argv[1], "PythonLR", pyFiles=[realpath(__file__)])
slices = int(sys.argv[2]) if len(sys.argv) > 2 else 2
......
......@@ -24,8 +24,7 @@ from pyspark import SparkContext
if __name__ == "__main__":
if len(sys.argv) == 1:
print >> sys.stderr, \
"Usage: PythonPi <master> [<slices>]"
print >> sys.stderr, "Usage: pi <master> [<slices>]"
exit(-1)
sc = SparkContext(sys.argv[1], "PythonPi")
slices = int(sys.argv[2]) if len(sys.argv) > 2 else 2
......
......@@ -37,10 +37,9 @@ def generateGraph():
if __name__ == "__main__":
if len(sys.argv) == 1:
print >> sys.stderr, \
"Usage: PythonTC <master> [<slices>]"
print >> sys.stderr, "Usage: transitive_closure <master> [<slices>]"
exit(-1)
sc = SparkContext(sys.argv[1], "PythonTC")
sc = SparkContext(sys.argv[1], "PythonTransitiveClosure")
slices = int(sys.argv[2]) if len(sys.argv) > 2 else 2
tc = sc.parallelize(generateGraph(), slices).cache()
......
......@@ -23,8 +23,7 @@ from pyspark import SparkContext
if __name__ == "__main__":
if len(sys.argv) < 3:
print >> sys.stderr, \
"Usage: PythonWordCount <master> <file>"
print >> sys.stderr, "Usage: wordcount <master> <file>"
exit(-1)
sc = SparkContext(sys.argv[1], "PythonWordCount")
lines = sc.textFile(sys.argv[2], 1)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment