diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
index 88ae83957a708d9d3b4da8b9bba7f6209e2ac2d7..f9a6a09b6270df351b9cac5e7a4811b0506bd88e 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
@@ -139,6 +139,7 @@ object JdbcDialects {
   registerDialect(DB2Dialect)
   registerDialect(MsSqlServerDialect)
   registerDialect(DerbyDialect)
+  registerDialect(OracleDialect)
 
 
   /**
@@ -315,3 +316,27 @@ case object DerbyDialect extends JdbcDialect {
 
 }
 
+/**
+ * :: DeveloperApi ::
+ * Default Oracle dialect, mapping a nonspecific numeric type to a general decimal type.
+ */
+@DeveloperApi
+case object OracleDialect extends JdbcDialect {
+  override def canHandle(url: String): Boolean = url.startsWith("jdbc:oracle")
+  override def getCatalystType(
+      sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = {
+    // Handle NUMBER fields that have no precision/scale in special way
+    // because JDBC ResultSetMetaData converts this to 0 procision and -127 scale
+    // For more details, please see
+    // https://github.com/apache/spark/pull/8780#issuecomment-145598968
+    // and
+    // https://github.com/apache/spark/pull/8780#issuecomment-144541760
+    if (sqlType == Types.NUMERIC && size == 0) {
+      // This is sub-optimal as we have to pick a precision/scale in advance whereas the data
+      //  in Oracle is allowed to have different precision/scale for each value.
+      Some(DecimalType(DecimalType.MAX_PRECISION, 10))
+    } else {
+      None
+    }
+  }
+}