Skip to content

Commit

Permalink
Add FunctionCatalog API.
Browse files Browse the repository at this point in the history
  • Loading branch information
rdblue committed May 22, 2019
1 parent 09422f5 commit e520366
Show file tree
Hide file tree
Showing 9 changed files with 486 additions and 4 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.catalog.v2;

import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.types.DataType;

/**
* Interface for a function that produces a result value by aggregating over multiple input rows.
* <p>
* The JVM type of result values produced by this function must be the type used by Spark's
* InternalRow API for the {@link DataType SQL data type} returned by {@link #resultType()}.
*
* @param <S> the JVM type for the aggregation's intermediate state
* @param <R> the JVM type of result values
*/
public interface AggregateFunction<S, R> extends BoundFunction {

/**
* Initialize state for an aggregation.
* <p>
* This method is called one or more times for every group of values to initialize intermediate
* aggregation state. More than one intermediate aggregation state variable may be used when the
* aggregation is run in parallel tasks.
* <p>
* The object returned may passed to {@link #update(Object, InternalRow)},
* and {@link #produceResult(Object)}. Implementations that return null must support null state
* passed into all other methods.
*
* @return a state instance or null
*/
S newAggregationState();

/**
* Update the aggregation state with a new row.
* <p>
* This is called for each row in a group to update an intermediate aggregation state.
*
* @param state intermediate aggregation state
* @param input an input row
* @return updated aggregation state
*/
S update(S state, InternalRow input);

/**
* Produce the aggregation result based on intermediate state.
*
* @param state intermediate aggregation state
* @return a result value
*/
R produceResult(S state);

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.catalog.v2;

import java.io.Serializable;

/**
* Interface for an aggregate function that can be partially applied in parallel tasks, then merged
* to produce a final result.
* <p>
* Intermediate aggregation state must be {@link Serializable} so that state produced by parallel
* tasks can be sent to a single executor and merged to produce a final result.
*
* @param <S> the JVM type for the aggregation's intermediate state; must be {@link Serializable}
* @param <R> the JVM type of result values
*/
public interface AssociativeAggregateFunction<S extends Serializable, R>
extends AggregateFunction<S, R> {

/**
* Merge two partial aggregation states.
* <p>
* This is called to merge intermediate aggregation states that were produced by parallel tasks.
*
* @param leftState intermediate aggregation state
* @param rightState intermediate aggregation state
* @return combined aggregation state
*/
S merge(S leftState, S rightState);

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.catalog.v2;

import org.apache.spark.sql.types.DataType;
import org.apache.spark.sql.types.IntegerType;

import java.io.Serializable;

/**
* Represents a function that is bound to an input type.
*/
public interface BoundFunction extends Function, Serializable {

/**
* Returns the {@link DataType data type} of values produced by this function.
* <p>
* For example, a "plus" function may return {@link IntegerType} when it is bound to arguments
* that are also {@link IntegerType}.
*
* @return a data type for values produced by this function
*/
DataType resultType();

/**
* Returns the whether values produced by this function may be null.
* <p>
* For example, a "plus" function may return false when it is bound to arguments that are always
* non-null, but true when either argument may be null.
*
* @return true if values produced by this function may be null, false otherwise
*/
default boolean isResultNullable() {
return true;
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.catalog.v2;

import java.io.Serializable;

/**
* Base class for user-defined functions.
*/
public interface Function extends Serializable {

/**
* A name to identify this function. Implementations should provide a meaningful name, like the
* database and function name from the catalog.
*/
String name();

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.catalog.v2;

import org.apache.spark.sql.catalyst.analysis.NoSuchFunctionException;
import org.apache.spark.sql.catalyst.analysis.NoSuchNamespaceException;

/**
* Catalog methods for working with Functions.
*/
public interface FunctionCatalog extends CatalogPlugin {

/**
* List the functions in a namespace from the catalog.
*
* @param namespace a multi-part namespace
* @return an array of Identifiers for functions
* @throws NoSuchNamespaceException If the namespace does not exist (optional).
*/
Identifier[] listFunctions(String[] namespace) throws NoSuchNamespaceException;

/**
* Load a function by {@link Identifier identifier} from the catalog.
*
* @param ident a function identifier
* @return an unbound function instance
* @throws NoSuchFunctionException If the function doesn't exist
*/
UnboundFunction loadFunction(Identifier ident) throws NoSuchFunctionException;

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.catalog.v2;

import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.types.DataType;

/**
* Interface for a function that produces a result value for each input row.
* <p>
* The JVM type of result values produced by this function must be the type used by Spark's
* InternalRow API for the {@link DataType SQL data type} returned by {@link #resultType()}.
*
* @param <R> the JVM type of result values
*/
public interface ScalarFunction<R> extends BoundFunction {

/**
* Applies the function to an input row to produce a value.
*
* @param input an input row
* @return a result value
*/
R produceResult(InternalRow input);

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.catalog.v2;

import org.apache.spark.sql.types.StructType;

/**
* Represents a user-defined function that is not bound to input types.
*/
public interface UnboundFunction extends Function {

/**
* Bind this function to an input type.
* <p>
* If the input type is not supported, implementations must throw
* {@link UnsupportedOperationException}.
* <p>
* For example, a "length" function that only supports a single string argument should throw
* UnsupportedOperationException if the struct has more than one field or if that field is not a
* string, and it may optionally throw if the field is nullable.
*
* @param inputType a struct type for inputs that will be passed to the bound function
* @return a function that can process rows with the given input type
* @throws UnsupportedOperationException If the function cannot be applied to the input type
*/
BoundFunction bind(StructType inputType);

}
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,22 @@ class NoSuchPartitionException(
class NoSuchPermanentFunctionException(db: String, func: String)
extends AnalysisException(s"Function '$func' not found in database '$db'")

class NoSuchFunctionException(db: String, func: String, cause: Option[Throwable] = None)
extends AnalysisException(
s"Undefined function: '$func'. This function is neither a registered temporary function nor " +
s"a permanent function registered in the database '$db'.", cause = cause)
class NoSuchFunctionException(
msg: String,
cause: Option[Throwable] = None) extends AnalysisException(msg, cause = cause) {

import org.apache.spark.sql.catalog.v2.CatalogV2Implicits._

def this(db: String, func: String, cause: Option[Throwable] = None) = {
this(s"Undefined function: '$func'. " +
s"This function is neither a registered temporary function nor " +
s"a permanent function registered in the database '$db'.", cause = cause)
}

def this(identifier: Identifier, cause: Option[Throwable]) = {
this(s"Undefined function: ${identifier.quoted}", cause = cause)
}
}

class NoSuchPartitionsException(db: String, table: String, specs: Seq[TablePartitionSpec])
extends AnalysisException(
Expand Down
Loading

0 comments on commit e520366

Please sign in to comment.