Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
S
spark
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
cs525-sp18-g07
spark
Commits
bf7033f3
Commit
bf7033f3
authored
11 years ago
by
Ginger Smith
Browse files
Options
Downloads
Patches
Plain Diff
fixing formatting, style, and input
parent
8c8947e2
No related branches found
Branches containing commit
No related tags found
Tags containing commit
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala
+37
-36
37 additions, 36 deletions
mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala
with
37 additions
and
36 deletions
mllib/src/main/scala/spark/mllib/util/MFDataGenerator.scala
+
37
−
36
View file @
bf7033f3
...
...
@@ -28,32 +28,32 @@ import spark.mllib.util.MLUtils
* Generate RDD(s) containing data for Matrix Factorization.
*
* This method samples training entries according to the oversampling factor
* 'tr
_s
amp
_f
act', which is a multiplicative factor of the number of
* 'tr
ainS
amp
F
act', which is a multiplicative factor of the number of
* degrees of freedom of the matrix: rank*(m+n-rank).
*
* It optionally samples entries for a testing matrix using
* 'te
_
samp
_f
act', the percentage of the number of training entries
* 'tes
tS
amp
F
act', the percentage of the number of training entries
* to use for testing.
*
* This method takes the following inputs:
*
sparkMaster
(String) The master URL.
*
outputPath
(String) Directory to save output.
*
m
(Int) Number of rows in data matrix.
*
n
(Int) Number of columns in data matrix.
*
rank
(Int) Underlying rank of data matrix.
*
tr_s
amp
_f
act
(Double) Oversampling factor.
*
noise
(Boolean) Whether to add gaussian noise to training data.
*
sigma
(Double) Standard deviation of added gaussian noise.
*
test
(Boolean) Whether to create testing RDD.
*
te_s
amp
_f
act
(Double) Percentage of training data to use as test data.
*
sparkMaster
(String) The master URL.
*
outputPath
(String) Directory to save output.
*
m
(Int) Number of rows in data matrix.
*
n
(Int) Number of columns in data matrix.
*
rank
(Int) Underlying rank of data matrix.
*
trainS
amp
F
act (Double) Oversampling factor.
*
noise
(Boolean) Whether to add gaussian noise to training data.
*
sigma
(Double) Standard deviation of added gaussian noise.
*
test
(Boolean) Whether to create testing RDD.
*
testS
amp
F
act
(Double) Percentage of training data to use as test data.
*/
object
MFDataGenerator
{
def
main
(
args
:
Array
[
String
])
{
if
(
args
.
length
!=
10
)
{
println
(
"Usage: MFGenerator "
+
"<master> <output
_d
ir>
<m> <n> <rank> <tr_s
amp
_f
act
> <
noise
> <
sigma
> <
test
> <te_s
amp
_f
act
>
"
)
if
(
args
.
length
<
2
)
{
println
(
"Usage: MF
Data
Generator "
+
"<master> <output
D
ir>
[m] [n] [rank] [trainS
amp
F
act
] [
noise
] [
sigma
] [
test
] [testS
amp
F
act
]
"
)
System
.
exit
(
1
)
}
...
...
@@ -62,51 +62,52 @@ object MFDataGenerator{
val
m
:
Int
=
if
(
args
.
length
>
2
)
args
(
2
).
toInt
else
100
val
n
:
Int
=
if
(
args
.
length
>
3
)
args
(
3
).
toInt
else
100
val
rank
:
Int
=
if
(
args
.
length
>
4
)
args
(
4
).
toInt
else
10
val
tr
_s
amp
_f
act
:
Double
=
if
(
args
.
length
>
5
)
args
(
5
).
toDouble
else
1.0
val
tr
ainS
amp
F
act
:
Double
=
if
(
args
.
length
>
5
)
args
(
5
).
toDouble
else
1.0
val
noise
:
Boolean
=
if
(
args
.
length
>
6
)
args
(
6
).
toBoolean
else
false
val
sigma
:
Double
=
if
(
args
.
length
>
7
)
args
(
7
).
toDouble
else
0.1
val
test
:
Boolean
=
if
(
args
.
length
>
8
)
args
(
8
).
toBoolean
else
false
val
te
_
samp
_f
act
:
Double
=
if
(
args
.
length
>
9
)
args
(
9
).
toDouble
else
0.1
val
tes
tS
amp
F
act
:
Double
=
if
(
args
.
length
>
9
)
args
(
9
).
toDouble
else
0.1
val
sc
=
new
SparkContext
(
sparkMaster
,
"MFDataGenerator"
)
val
A
=
DoubleMatrix
.
randn
(
m
,
rank
)
val
B
=
DoubleMatrix
.
randn
(
rank
,
n
)
val
z
=
1
/
(
scala
.
math
.
sqrt
(
scala
.
math
.
sqrt
(
rank
)))
val
A
=
DoubleMatrix
.
randn
(
m
,
rank
)
val
B
=
DoubleMatrix
.
randn
(
rank
,
n
)
val
z
=
1
/
(
scala
.
math
.
sqrt
(
scala
.
math
.
sqrt
(
rank
)))
A
.
mmuli
(
z
)
B
.
mmuli
(
z
)
val
fullData
=
A
.
mmul
(
B
)
val
df
=
rank
*(
m
+
n
-
rank
)
val
sampsize
=
scala
.
math
.
min
(
scala
.
math
.
round
(
tr_samp_fact
*
df
),
scala
.
math
.
round
(.
99
*
m
*
n
)).
toInt
val
df
=
rank
*
(
m
+
n
-
rank
)
val
sampSize
=
scala
.
math
.
min
(
scala
.
math
.
round
(
trainSampFact
*
df
),
scala
.
math
.
round
(.
99
*
m
*
n
)).
toInt
val
rand
=
new
Random
()
val
mn
=
m
*
n
val
mn
=
m
*
n
val
shuffled
=
rand
.
shuffle
(
1
to
mn
toIterable
)
val
omega
=
shuffled
.
slice
(
0
,
samp
s
ize
)
val
omega
=
shuffled
.
slice
(
0
,
samp
S
ize
)
val
ordered
=
omega
.
sortWith
(
_
<
_
).
toArray
val
trainData
:
RDD
[(
Int
,
Int
,
Double
)]
=
sc
.
parallelize
(
ordered
)
.
map
(
x
=>
(
fullData
.
indexRows
(
x
-
1
),
fullData
.
indexColumns
(
x
-
1
),
fullData
.
get
(
x
-
1
)))
.
map
(
x
=>
(
fullData
.
indexRows
(
x
-
1
),
fullData
.
indexColumns
(
x
-
1
),
fullData
.
get
(
x
-
1
)))
// optionally add gaussian noise
if
(
noise
)
{
trainData
.
map
(
x
=>
(
x
.
_1
,
x
.
_2
,
x
.
_3
+
rand
.
nextGaussian
*
sigma
))
if
(
noise
)
{
trainData
.
map
(
x
=>
(
x
.
_1
,
x
.
_2
,
x
.
_3
+
rand
.
nextGaussian
*
sigma
))
}
trainData
.
map
(
x
=>
x
.
_1
+
","
+
x
.
_2
+
","
+
x
.
_3
).
saveAsTextFile
(
outputPath
)
// optionally generate testing data
if
(
test
){
val
test_sampsize
=
scala
.
math
.
min
(
scala
.
math
.
round
(
sampsize
*
te_samp_fact
),
scala
.
math
.
round
(
mn
-
sampsize
))
.
toInt
val
test_omega
=
shuffled
.
slice
(
sampsize
,
sampsize
+
test_sampsize
)
val
test_ordered
=
test_omega
.
sortWith
(
_
<
_
).
toArray
val
testData
:
RDD
[(
Int
,
Int
,
Double
)]
=
sc
.
parallelize
(
test_ordered
)
.
map
(
x
=>
(
fullData
.
indexRows
(
x
-
1
),
fullData
.
indexColumns
(
x
-
1
),
fullData
.
get
(
x
-
1
)))
if
(
test
)
{
val
testSampSize
=
scala
.
math
.
min
(
scala
.
math
.
round
(
sampSize
*
testSampFact
),
scala
.
math
.
round
(
mn
-
sampSize
)).
toInt
val
testOmega
=
shuffled
.
slice
(
sampSize
,
sampSize
+
testSampSize
)
val
testOrdered
=
testOmega
.
sortWith
(
_
<
_
).
toArray
val
testData
:
RDD
[(
Int
,
Int
,
Double
)]
=
sc
.
parallelize
(
testOrdered
)
.
map
(
x
=>
(
fullData
.
indexRows
(
x
-
1
),
fullData
.
indexColumns
(
x
-
1
),
fullData
.
get
(
x
-
1
)))
testData
.
map
(
x
=>
x
.
_1
+
","
+
x
.
_2
+
","
+
x
.
_3
).
saveAsTextFile
(
outputPath
)
}
sc
.
stop
()
sc
.
stop
()
}
}
\ No newline at end of file
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment