diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md index 2173aba763f8abab90cf0ede4b6175452e726ec5..e72a0be148e1f5118e841be5930a6f67a4625eb0 100644 --- a/docs/sql-programming-guide.md +++ b/docs/sql-programming-guide.md @@ -1355,7 +1355,7 @@ Thrift JDBC server also supports sending thrift RPC messages over HTTP transport Use the following setting to enable HTTP mode as system property or in `hive-site.xml` file in `conf/`: hive.server2.transport.mode - Set this to value: http - hive.server2.thrift.http.port - HTTP port number fo listen on; default is 10001 + hive.server2.thrift.http.port - HTTP port number to listen on; default is 10001 hive.server2.http.endpoint - HTTP endpoint; default is cliservice To test, use beeline to connect to the JDBC/ODBC server in http mode with: diff --git a/examples/src/main/python/sql/basic.py b/examples/src/main/python/sql/basic.py index ebcf66995b4777bd696eb471d6003c82cf663932..c07fa8f2752b35f299b15c3a79697147005f6c3e 100644 --- a/examples/src/main/python/sql/basic.py +++ b/examples/src/main/python/sql/basic.py @@ -187,9 +187,6 @@ def programmatic_schema_example(spark): # Creates a temporary view using the DataFrame schemaPeople.createOrReplaceTempView("people") - # Creates a temporary view using the DataFrame - schemaPeople.createOrReplaceTempView("people") - # SQL can be run over DataFrames that have been registered as a table. results = spark.sql("SELECT name FROM people")