diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala b/core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala index 2987dc04494a512e86d520404aa648b99fbce699..f0e43fbf709761076eb7531bab39aaa49eab6d01 100644 --- a/core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala +++ b/core/src/main/scala/org/apache/spark/ui/jobs/ExecutorTable.scala @@ -71,19 +71,19 @@ private[ui] class ExecutorTable(stageId: Int, stageAttemptId: Int, parent: JobPr <tr> <td>{k}</td> <td>{executorIdToAddress.getOrElse(k, "CANNOT FIND ADDRESS")}</td> - <td sorttable_customekey={v.taskTime.toString}>{UIUtils.formatDuration(v.taskTime)}</td> + <td sorttable_customkey={v.taskTime.toString}>{UIUtils.formatDuration(v.taskTime)}</td> <td>{v.failedTasks + v.succeededTasks}</td> <td>{v.failedTasks}</td> <td>{v.succeededTasks}</td> - <td sorttable_customekey={v.inputBytes.toString}> + <td sorttable_customkey={v.inputBytes.toString}> {Utils.bytesToString(v.inputBytes)}</td> - <td sorttable_customekey={v.shuffleRead.toString}> + <td sorttable_customkey={v.shuffleRead.toString}> {Utils.bytesToString(v.shuffleRead)}</td> - <td sorttable_customekey={v.shuffleWrite.toString}> + <td sorttable_customkey={v.shuffleWrite.toString}> {Utils.bytesToString(v.shuffleWrite)}</td> - <td sorttable_customekey={v.memoryBytesSpilled.toString}> + <td sorttable_customkey={v.memoryBytesSpilled.toString}> {Utils.bytesToString(v.memoryBytesSpilled)}</td> - <td sorttable_customekey={v.diskBytesSpilled.toString}> + <td sorttable_customkey={v.diskBytesSpilled.toString}> {Utils.bytesToString(v.diskBytesSpilled)}</td> </tr> } diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/StageTable.scala b/core/src/main/scala/org/apache/spark/ui/jobs/StageTable.scala index 2e67310594784f06b3587184977114afadce1278..4ee7f08ab47a24fb68380f3665b7ab31dd82b5d8 100644 --- a/core/src/main/scala/org/apache/spark/ui/jobs/StageTable.scala +++ b/core/src/main/scala/org/apache/spark/ui/jobs/StageTable.scala @@ -176,9 +176,9 @@ private[ui] class StageTableBase( {makeProgressBar(stageData.numActiveTasks, stageData.completedIndices.size, stageData.numFailedTasks, s.numTasks)} </td> - <td sorttable_customekey={inputRead.toString}>{inputReadWithUnit}</td> - <td sorttable_customekey={shuffleRead.toString}>{shuffleReadWithUnit}</td> - <td sorttable_customekey={shuffleWrite.toString}>{shuffleWriteWithUnit}</td> + <td sorttable_customkey={inputRead.toString}>{inputReadWithUnit}</td> + <td sorttable_customkey={shuffleRead.toString}>{shuffleReadWithUnit}</td> + <td sorttable_customkey={shuffleWrite.toString}>{shuffleWriteWithUnit}</td> } /** Render an HTML row that represents a stage */ diff --git a/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala b/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala index 716591c9ed4498c9e5fd40c802b54d6a60de6b70..83489ca0679ee92e8c62aac72b20b7dbdd6dc9b0 100644 --- a/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala +++ b/core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala @@ -58,9 +58,9 @@ private[ui] class StoragePage(parent: StorageTab) extends WebUIPage("") { </td> <td>{rdd.numCachedPartitions}</td> <td>{"%.0f%%".format(rdd.numCachedPartitions * 100.0 / rdd.numPartitions)}</td> - <td sorttable_customekey={rdd.memSize.toString}>{Utils.bytesToString(rdd.memSize)}</td> - <td sorttable_customekey={rdd.tachyonSize.toString}>{Utils.bytesToString(rdd.tachyonSize)}</td> - <td sorttable_customekey={rdd.diskSize.toString} >{Utils.bytesToString(rdd.diskSize)}</td> + <td sorttable_customkey={rdd.memSize.toString}>{Utils.bytesToString(rdd.memSize)}</td> + <td sorttable_customkey={rdd.tachyonSize.toString}>{Utils.bytesToString(rdd.tachyonSize)}</td> + <td sorttable_customkey={rdd.diskSize.toString} >{Utils.bytesToString(rdd.diskSize)}</td> </tr> // scalastyle:on }