diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md index 235f5ecc40c9fe1d975b0eb46a2d1c6fd26593c3..2dd1ab6ef3de1639499c6883caeb78e8e81d0e5c 100644 --- a/docs/sql-programming-guide.md +++ b/docs/sql-programming-guide.md @@ -1410,7 +1410,7 @@ Thrift JDBC server also supports sending thrift RPC messages over HTTP transport Use the following setting to enable HTTP mode as system property or in `hive-site.xml` file in `conf/`: hive.server2.transport.mode - Set this to value: http - hive.server2.thrift.http.port - HTTP port number fo listen on; default is 10001 + hive.server2.thrift.http.port - HTTP port number to listen on; default is 10001 hive.server2.http.endpoint - HTTP endpoint; default is cliservice To test, use beeline to connect to the JDBC/ODBC server in http mode with: diff --git a/examples/src/main/python/sql/basic.py b/examples/src/main/python/sql/basic.py index ebcf66995b4777bd696eb471d6003c82cf663932..c07fa8f2752b35f299b15c3a79697147005f6c3e 100644 --- a/examples/src/main/python/sql/basic.py +++ b/examples/src/main/python/sql/basic.py @@ -187,9 +187,6 @@ def programmatic_schema_example(spark): # Creates a temporary view using the DataFrame schemaPeople.createOrReplaceTempView("people") - # Creates a temporary view using the DataFrame - schemaPeople.createOrReplaceTempView("people") - # SQL can be run over DataFrames that have been registered as a table. results = spark.sql("SELECT name FROM people")