diff --git a/core/src/main/scala/org/apache/spark/storage/DiskStore.scala b/core/src/main/scala/org/apache/spark/storage/DiskStore.scala index 8dadf6794039e725ce674ef5511fa541cbb217bb..61ef5ff168791ed615c67aeb88ea79c100b84548 100644 --- a/core/src/main/scala/org/apache/spark/storage/DiskStore.scala +++ b/core/src/main/scala/org/apache/spark/storage/DiskStore.scala @@ -31,7 +31,8 @@ import org.apache.spark.util.Utils private[spark] class DiskStore(blockManager: BlockManager, diskManager: DiskBlockManager) extends BlockStore(blockManager) with Logging { - val minMemoryMapBytes = blockManager.conf.getLong("spark.storage.memoryMapThreshold", 2 * 4096L) + val minMemoryMapBytes = blockManager.conf.getLong( + "spark.storage.memoryMapThreshold", 2 * 1024L * 1024L) override def getSize(blockId: BlockId): Long = { diskManager.getFile(blockId.name).length diff --git a/docs/configuration.md b/docs/configuration.md index 2add48569beced0a452055c053a9325d9768198a..f292bfbb7dcd65c4ea9799d34e29499e0928e92c 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -678,7 +678,7 @@ Apart from these, the following properties are also available, and may be useful </tr> <tr> <td><code>spark.storage.memoryMapThreshold</code></td> - <td>8192</td> + <td>2097152</td> <td> Size of a block, in bytes, above which Spark memory maps when reading a block from disk. This prevents Spark from memory mapping very small blocks. In general, memory