Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

V2connector #1241

Open
wants to merge 17 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -818,9 +818,13 @@ task product(type: Zip) {
dependsOn ":snappy-spark:snappy-spark-assembly_${scalaBinaryVersion}:sparkProduct"
dependsOn ':snappy-launcher:jar'
dependsOn ':jdbcJar'
// Commented "v2connector" in order to exclude from build and test process.
// uncomment if we decide to include it.
// dependsOn ":snappy-v2connector_${scalaBinaryVersion}:jar"

def clusterProject = project(":snappy-cluster_${scalaBinaryVersion}")
def launcherProject = project(':snappy-launcher')
// def v2connectorProject = project(":snappy-v2connector_${scalaBinaryVersion}")
def targetProject = clusterProject

if (isEnterpriseProduct) {
Expand Down Expand Up @@ -885,6 +889,13 @@ task product(type: Zip) {
include launcherProject.jar.archiveName
}

//Copying the V2Connector jar
// copy {
// from v2connectorProject.jar.destinationDir
// into "${snappyProductDir}/jars"
// include v2connectorProject.jar.archiveName
// }

// create the RELEASE file
def releaseFile = file("${snappyProductDir}/RELEASE")
String buildFlags = ''
Expand Down Expand Up @@ -1162,8 +1173,26 @@ task jdbcJar {
}
}

// Uncomment for v2Connector project build.
//task v2ConnectorJar {
// dependsOn ":snappy-v2connector_${scalaBinaryVersion}:shadowJar"
//
// doLast {
// def v2ConnectorProject = project(":snappy-v2connector_${scalaBinaryVersion}")
// String v2ConnectorName = "snappydata-v2connector_${scalaBinaryVersion}-${version}.jar"
// // copy the snappy-v2connector shadow jar into distributions
// copy {
// from v2ConnectorProject.shadowJar.destinationDir
// into "${rootProject.buildDir}/distributions"
// include v2ConnectorProject.shadowJar.archiveName
// rename { filename -> v2ConnectorName }
// }
// }
//}

task copyShadowJars {
dependsOn jdbcJar
// dependsOn v2ConnectorJar
dependsOn ":snappy-core_${scalaBinaryVersion}:shadowJar"

doLast {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ class SnappyUnifiedMemoryManagerDUnitTest(s: String) extends ClusterManagerTestB
stmt.execute(s"CALL SYS.SET_BUCKETS_FOR_LOCAL_EXECUTION('$columnTable', " +
s"'${(0 until numBuckets).mkString(",")}', -1)")
val rs = stmt.executeQuery(s"CALL SYS.COLUMN_TABLE_SCAN('$columnTable', " +
s"'${(1 to numColumns).mkString(",")}', null)")
s"'${(1 to numColumns).mkString(",")}', null, 1)")
var n = 0
while (rs.next()) {
n += 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,8 @@ final class SmartConnectorRDDHelper {
def prepareScan(conn: Connection, txId: String, columnTable: String, projection: Array[Int],
serializedFilters: Array[Byte], partition: SmartExecutorBucketPartition,
catalogVersion: Long): (PreparedStatement, ResultSet) = {
val pstmt = conn.prepareStatement("call sys.COLUMN_TABLE_SCAN(?, ?, ?)")

val pstmt = conn.prepareStatement("call sys.COLUMN_TABLE_SCAN(?, ?, ?, 1)")
pstmt.setString(1, columnTable)
pstmt.setString(2, projection.mkString(","))
// serialize the filters
Expand Down
7 changes: 0 additions & 7 deletions core/src/main/scala/org/apache/spark/sql/SnappyContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -1325,10 +1325,3 @@ case class LocalMode(override val sc: SparkContext,
override val url: String) extends ClusterMode {
override val description: String = "Local mode"
}

class TableNotFoundException(schema: String, table: String, cause: Option[Throwable] = None)
extends AnalysisException(s"Table or view '$table' not found in schema '$schema'",
cause = cause)

class PolicyNotFoundException(schema: String, name: String, cause: Option[Throwable] = None)
extends AnalysisException(s"Policy '$name' not found in schema '$schema'", cause = cause)
Original file line number Diff line number Diff line change
Expand Up @@ -839,11 +839,6 @@ object Utils {
TASKCONTEXT_FUNCTION
}

def executorsListener(sc: SparkContext): Option[ExecutorsListener] = sc.ui match {
case Some(ui) => Some(ui.executorsListener)
case _ => None
}

def getActiveSession: Option[SparkSession] = SparkSession.getActiveSession

def sqlInternal(snappy: SnappySession, sqlText: String): CachedDataFrame =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,30 +16,21 @@
*/
package org.apache.spark.sql.execution.columnar

import java.nio.{ByteBuffer, ByteOrder}
import java.sql.{Connection, ResultSet, Statement}
import java.nio.ByteBuffer
import java.util.function.BiFunction

import scala.collection.mutable.ArrayBuffer
import scala.language.implicitConversions
import scala.util.control.NonFatal

import com.gemstone.gemfire.cache.EntryDestroyedException
import com.gemstone.gemfire.internal.cache.{BucketRegion, GemFireCacheImpl, LocalRegion, NonLocalRegionEntry, PartitionedRegion, RegionEntry, TXStateInterface}
import com.gemstone.gemfire.internal.shared.{BufferAllocator, FetchRequest}
import com.gemstone.gemfire.internal.cache._
import com.gemstone.gemfire.internal.shared.FetchRequest
import com.pivotal.gemfirexd.internal.engine.store.GemFireContainer
import com.pivotal.gemfirexd.internal.impl.jdbc.EmbedConnection
import io.snappydata.thrift.common.BufferedBlob
import org.eclipse.collections.api.block.procedure.Procedure
import org.eclipse.collections.impl.map.mutable.primitive.IntObjectHashMap

import org.apache.spark.memory.MemoryManagerCallback.releaseExecutionMemory
import org.apache.spark.sql.execution.columnar.encoding.{ColumnDecoder, ColumnDeleteDecoder, ColumnEncoding, UpdatedColumnDecoder, UpdatedColumnDecoderBase}
import org.apache.spark.TaskContext
import org.apache.spark.sql.execution.columnar.encoding._
import org.apache.spark.sql.execution.columnar.impl._
import org.apache.spark.sql.execution.row.PRValuesIterator
import org.apache.spark.sql.store.CompressionUtils
import org.apache.spark.sql.types.StructField
import org.apache.spark.{Logging, TaskContext, TaskContextImpl, TaskKilledException}

import scala.collection.mutable.ArrayBuffer
import scala.language.implicitConversions

case class ColumnBatch(numRows: Int, buffers: Array[ByteBuffer],
statsData: Array[Byte], deltaIndexes: Array[Int])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import java.util.Collections

import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer

import com.gemstone.gemfire.cache.{EntryDestroyedException, RegionDestroyedException}
import com.gemstone.gemfire.internal.cache.lru.LRUEntry
import com.gemstone.gemfire.internal.cache.persistence.query.CloseableIterator
Expand All @@ -43,16 +42,15 @@ import com.pivotal.gemfirexd.internal.impl.jdbc.{EmbedConnection, Util}
import com.pivotal.gemfirexd.internal.impl.sql.execute.PrivilegeInfo
import com.pivotal.gemfirexd.internal.shared.common.reference.SQLState
import io.snappydata.SnappyTableStatsProviderService
import io.snappydata.sql.catalog.{CatalogObjectType, SnappyExternalCatalog}

import io.snappydata.sql.catalog.{CatalogObjectType, SmartConnectorHelper, SnappyExternalCatalog}
import org.apache.spark.Logging
import org.apache.spark.memory.{MemoryManagerCallback, MemoryMode}
import org.apache.spark.serializer.KryoSerializerPool
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.expressions.codegen.{CodeAndComment, CodeFormatter, CodeGenerator, CodegenContext}
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression, Literal, TokenLiteral, UnsafeRow}
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, expressions}
import org.apache.spark.sql.collection.{ToolsCallbackInit, Utils}
import org.apache.spark.sql.collection.{SharedUtils, ToolsCallbackInit, Utils}
import org.apache.spark.sql.execution.ConnectionPool
import org.apache.spark.sql.execution.columnar.encoding.ColumnStatsSchema
import org.apache.spark.sql.execution.columnar.{ColumnBatchCreator, ColumnBatchIterator, ColumnTableScan, ExternalStore, ExternalStoreUtils}
Expand Down Expand Up @@ -129,7 +127,7 @@ object StoreCallbacksImpl extends StoreCallbacks with Logging with Serializable
// add weightage column for sample tables if required
var schema = catalogEntry.schema.asInstanceOf[StructType]
if (catalogEntry.tableType == CatalogObjectType.Sample.toString &&
schema(schema.length - 1).name != Utils.WEIGHTAGE_COLUMN_NAME) {
schema(schema.length - 1).name != Utils.WEIGHTAGE_COLUMN_NAME) {
schema = schema.add(Utils.WEIGHTAGE_COLUMN_NAME,
LongType, nullable = false)
}
Expand Down Expand Up @@ -197,11 +195,18 @@ object StoreCallbacksImpl extends StoreCallbacks with Logging with Serializable
@throws(classOf[SQLException])
override def columnTableScan(columnTable: String,
projection: Array[Int], serializedFilters: Array[Byte],
bucketIds: java.util.Set[Integer]): CloseableIterator[ColumnTableEntry] = {
bucketIds: java.util.Set[Integer],
useKryoSerializer: Boolean): CloseableIterator[ColumnTableEntry] = {
// deserialize the filters
val batchFilters = if ((serializedFilters ne null) && serializedFilters.length > 0) {
KryoSerializerPool.deserialize(serializedFilters, 0, serializedFilters.length,
(kryo, in) => kryo.readObject(in, classOf[Array[Filter]])).toSeq
if (useKryoSerializer) {
KryoSerializerPool.deserialize(serializedFilters, 0, serializedFilters.length,
(kryo, in) => kryo.readObject(in, classOf[Array[Filter]])).toSeq
} else {
// java serializer
val v = SharedUtils.deserialize(serializedFilters).asInstanceOf[Array[Filter]]
v.toSeq
}
} else null
val (region, schemaAttrs, batchFilterExprs) = try {
val lr = Misc.getRegionForTable(columnTable, true).asInstanceOf[LocalRegion]
Expand Down Expand Up @@ -372,7 +377,8 @@ object StoreCallbacksImpl extends StoreCallbacks with Logging with Serializable
private def attr(a: String, schema: Seq[AttributeReference]): AttributeReference = {
// filter passed should have same case as in schema and not be qualified which
// should be true since these have been created from resolved Expression by sender
schema.find(_.name == a) match {
// TODO: [shirish] converted to uppercase to make v2 connector work
schema.find(x => x.name == a || x.name == a.toUpperCase) match {
case Some(attr) => attr
case _ => throw Utils.analysisException(s"Could not find $a in ${schema.mkString(", ")}")
}
Expand Down
Loading