Merging PR_218 openai_rev package with new streamlit chat app
This commit is contained in:
@@ -0,0 +1,60 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
// NOTE: API is EXPERIMENTAL and will change without going through a
|
||||
// deprecation cycle
|
||||
|
||||
#pragma once
|
||||
|
||||
/// \defgroup compute-concrete-options Concrete option classes for compute functions
|
||||
/// @{
|
||||
/// @}
|
||||
|
||||
#include "arrow/compute/api_aggregate.h" // IWYU pragma: export
|
||||
#include "arrow/compute/api_scalar.h" // IWYU pragma: export
|
||||
#include "arrow/compute/api_vector.h" // IWYU pragma: export
|
||||
#include "arrow/compute/cast.h" // IWYU pragma: export
|
||||
#include "arrow/compute/function.h" // IWYU pragma: export
|
||||
#include "arrow/compute/kernel.h" // IWYU pragma: export
|
||||
#include "arrow/compute/registry.h" // IWYU pragma: export
|
||||
#include "arrow/datum.h" // IWYU pragma: export
|
||||
|
||||
/// \defgroup execnode-expressions Utilities for creating expressions to
|
||||
/// use in execution plans
|
||||
/// @{
|
||||
/// @}
|
||||
|
||||
#include "arrow/compute/exec/expression.h" // IWYU pragma: export
|
||||
|
||||
/// \defgroup execnode-options Concrete option classes for ExecNode options
|
||||
/// @{
|
||||
/// @}
|
||||
|
||||
#include "arrow/compute/exec/options.h" // IWYU pragma: export
|
||||
|
||||
/// \defgroup execnode-row Utilities for working with data in a row-major format
|
||||
/// @{
|
||||
/// @}
|
||||
|
||||
#include "arrow/compute/row/grouper.h" // IWYU pragma: export
|
||||
|
||||
/// \defgroup execnode-components Components associated with ExecNode
|
||||
/// @{
|
||||
/// @}
|
||||
|
||||
#include "arrow/compute/exec.h" // IWYU pragma: export
|
||||
#include "arrow/compute/exec/exec_plan.h" // IWYU pragma: export
|
||||
@@ -0,0 +1,412 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
// Eager evaluation convenience APIs for invoking common functions, including
|
||||
// necessary memory allocations
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "arrow/compute/function.h"
|
||||
#include "arrow/datum.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/util/macros.h"
|
||||
#include "arrow/util/visibility.h"
|
||||
|
||||
namespace arrow {
|
||||
|
||||
class Array;
|
||||
|
||||
namespace compute {
|
||||
|
||||
class ExecContext;
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
// Aggregate functions
|
||||
|
||||
/// \addtogroup compute-concrete-options
|
||||
/// @{
|
||||
|
||||
/// \brief Control general scalar aggregate kernel behavior
|
||||
///
|
||||
/// By default, null values are ignored (skip_nulls = true).
|
||||
class ARROW_EXPORT ScalarAggregateOptions : public FunctionOptions {
|
||||
public:
|
||||
explicit ScalarAggregateOptions(bool skip_nulls = true, uint32_t min_count = 1);
|
||||
static constexpr char const kTypeName[] = "ScalarAggregateOptions";
|
||||
static ScalarAggregateOptions Defaults() { return ScalarAggregateOptions{}; }
|
||||
|
||||
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
||||
/// emit null.
|
||||
bool skip_nulls;
|
||||
/// If less than this many non-null values are observed, emit null.
|
||||
uint32_t min_count;
|
||||
};
|
||||
|
||||
/// \brief Control count aggregate kernel behavior.
|
||||
///
|
||||
/// By default, only non-null values are counted.
|
||||
class ARROW_EXPORT CountOptions : public FunctionOptions {
|
||||
public:
|
||||
enum CountMode {
|
||||
/// Count only non-null values.
|
||||
ONLY_VALID = 0,
|
||||
/// Count only null values.
|
||||
ONLY_NULL,
|
||||
/// Count both non-null and null values.
|
||||
ALL,
|
||||
};
|
||||
explicit CountOptions(CountMode mode = CountMode::ONLY_VALID);
|
||||
static constexpr char const kTypeName[] = "CountOptions";
|
||||
static CountOptions Defaults() { return CountOptions{}; }
|
||||
|
||||
CountMode mode;
|
||||
};
|
||||
|
||||
/// \brief Control Mode kernel behavior
|
||||
///
|
||||
/// Returns top-n common values and counts.
|
||||
/// By default, returns the most common value and count.
|
||||
class ARROW_EXPORT ModeOptions : public FunctionOptions {
|
||||
public:
|
||||
explicit ModeOptions(int64_t n = 1, bool skip_nulls = true, uint32_t min_count = 0);
|
||||
static constexpr char const kTypeName[] = "ModeOptions";
|
||||
static ModeOptions Defaults() { return ModeOptions{}; }
|
||||
|
||||
int64_t n = 1;
|
||||
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
||||
/// emit null.
|
||||
bool skip_nulls;
|
||||
/// If less than this many non-null values are observed, emit null.
|
||||
uint32_t min_count;
|
||||
};
|
||||
|
||||
/// \brief Control Delta Degrees of Freedom (ddof) of Variance and Stddev kernel
|
||||
///
|
||||
/// The divisor used in calculations is N - ddof, where N is the number of elements.
|
||||
/// By default, ddof is zero, and population variance or stddev is returned.
|
||||
class ARROW_EXPORT VarianceOptions : public FunctionOptions {
|
||||
public:
|
||||
explicit VarianceOptions(int ddof = 0, bool skip_nulls = true, uint32_t min_count = 0);
|
||||
static constexpr char const kTypeName[] = "VarianceOptions";
|
||||
static VarianceOptions Defaults() { return VarianceOptions{}; }
|
||||
|
||||
int ddof = 0;
|
||||
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
||||
/// emit null.
|
||||
bool skip_nulls;
|
||||
/// If less than this many non-null values are observed, emit null.
|
||||
uint32_t min_count;
|
||||
};
|
||||
|
||||
/// \brief Control Quantile kernel behavior
|
||||
///
|
||||
/// By default, returns the median value.
|
||||
class ARROW_EXPORT QuantileOptions : public FunctionOptions {
|
||||
public:
|
||||
/// Interpolation method to use when quantile lies between two data points
|
||||
enum Interpolation {
|
||||
LINEAR = 0,
|
||||
LOWER,
|
||||
HIGHER,
|
||||
NEAREST,
|
||||
MIDPOINT,
|
||||
};
|
||||
|
||||
explicit QuantileOptions(double q = 0.5, enum Interpolation interpolation = LINEAR,
|
||||
bool skip_nulls = true, uint32_t min_count = 0);
|
||||
|
||||
explicit QuantileOptions(std::vector<double> q,
|
||||
enum Interpolation interpolation = LINEAR,
|
||||
bool skip_nulls = true, uint32_t min_count = 0);
|
||||
|
||||
static constexpr char const kTypeName[] = "QuantileOptions";
|
||||
static QuantileOptions Defaults() { return QuantileOptions{}; }
|
||||
|
||||
/// quantile must be between 0 and 1 inclusive
|
||||
std::vector<double> q;
|
||||
enum Interpolation interpolation;
|
||||
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
||||
/// emit null.
|
||||
bool skip_nulls;
|
||||
/// If less than this many non-null values are observed, emit null.
|
||||
uint32_t min_count;
|
||||
};
|
||||
|
||||
/// \brief Control TDigest approximate quantile kernel behavior
|
||||
///
|
||||
/// By default, returns the median value.
|
||||
class ARROW_EXPORT TDigestOptions : public FunctionOptions {
|
||||
public:
|
||||
explicit TDigestOptions(double q = 0.5, uint32_t delta = 100,
|
||||
uint32_t buffer_size = 500, bool skip_nulls = true,
|
||||
uint32_t min_count = 0);
|
||||
explicit TDigestOptions(std::vector<double> q, uint32_t delta = 100,
|
||||
uint32_t buffer_size = 500, bool skip_nulls = true,
|
||||
uint32_t min_count = 0);
|
||||
static constexpr char const kTypeName[] = "TDigestOptions";
|
||||
static TDigestOptions Defaults() { return TDigestOptions{}; }
|
||||
|
||||
/// quantile must be between 0 and 1 inclusive
|
||||
std::vector<double> q;
|
||||
/// compression parameter, default 100
|
||||
uint32_t delta;
|
||||
/// input buffer size, default 500
|
||||
uint32_t buffer_size;
|
||||
/// If true (the default), null values are ignored. Otherwise, if any value is null,
|
||||
/// emit null.
|
||||
bool skip_nulls;
|
||||
/// If less than this many non-null values are observed, emit null.
|
||||
uint32_t min_count;
|
||||
};
|
||||
|
||||
/// \brief Control Index kernel behavior
|
||||
class ARROW_EXPORT IndexOptions : public FunctionOptions {
|
||||
public:
|
||||
explicit IndexOptions(std::shared_ptr<Scalar> value);
|
||||
// Default constructor for serialization
|
||||
IndexOptions();
|
||||
static constexpr char const kTypeName[] = "IndexOptions";
|
||||
|
||||
std::shared_ptr<Scalar> value;
|
||||
};
|
||||
|
||||
/// \brief Configure a grouped aggregation
|
||||
struct ARROW_EXPORT Aggregate {
|
||||
/// the name of the aggregation function
|
||||
std::string function;
|
||||
|
||||
/// options for the aggregation function
|
||||
std::shared_ptr<FunctionOptions> options;
|
||||
|
||||
// fields to which aggregations will be applied
|
||||
FieldRef target;
|
||||
|
||||
// output field name for aggregations
|
||||
std::string name;
|
||||
};
|
||||
|
||||
/// @}
|
||||
|
||||
/// \brief Count values in an array.
|
||||
///
|
||||
/// \param[in] options counting options, see CountOptions for more information
|
||||
/// \param[in] datum to count
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return out resulting datum
|
||||
///
|
||||
/// \since 1.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Count(const Datum& datum,
|
||||
const CountOptions& options = CountOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Compute the mean of a numeric array.
|
||||
///
|
||||
/// \param[in] value datum to compute the mean, expecting Array
|
||||
/// \param[in] options see ScalarAggregateOptions for more information
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return datum of the computed mean as a DoubleScalar
|
||||
///
|
||||
/// \since 1.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Mean(
|
||||
const Datum& value,
|
||||
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Compute the product of values of a numeric array.
|
||||
///
|
||||
/// \param[in] value datum to compute product of, expecting Array or ChunkedArray
|
||||
/// \param[in] options see ScalarAggregateOptions for more information
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return datum of the computed sum as a Scalar
|
||||
///
|
||||
/// \since 6.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Product(
|
||||
const Datum& value,
|
||||
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Sum values of a numeric array.
|
||||
///
|
||||
/// \param[in] value datum to sum, expecting Array or ChunkedArray
|
||||
/// \param[in] options see ScalarAggregateOptions for more information
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return datum of the computed sum as a Scalar
|
||||
///
|
||||
/// \since 1.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Sum(
|
||||
const Datum& value,
|
||||
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Calculate the min / max of a numeric array
|
||||
///
|
||||
/// This function returns both the min and max as a struct scalar, with type
|
||||
/// struct<min: T, max: T>, where T is the input type
|
||||
///
|
||||
/// \param[in] value input datum, expecting Array or ChunkedArray
|
||||
/// \param[in] options see ScalarAggregateOptions for more information
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return resulting datum as a struct<min: T, max: T> scalar
|
||||
///
|
||||
/// \since 1.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> MinMax(
|
||||
const Datum& value,
|
||||
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Test whether any element in a boolean array evaluates to true.
|
||||
///
|
||||
/// This function returns true if any of the elements in the array evaluates
|
||||
/// to true and false otherwise. Null values are ignored by default.
|
||||
/// If null values are taken into account by setting ScalarAggregateOptions
|
||||
/// parameter skip_nulls = false then Kleene logic is used.
|
||||
/// See KleeneOr for more details on Kleene logic.
|
||||
///
|
||||
/// \param[in] value input datum, expecting a boolean array
|
||||
/// \param[in] options see ScalarAggregateOptions for more information
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return resulting datum as a BooleanScalar
|
||||
///
|
||||
/// \since 3.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Any(
|
||||
const Datum& value,
|
||||
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Test whether all elements in a boolean array evaluate to true.
|
||||
///
|
||||
/// This function returns true if all of the elements in the array evaluate
|
||||
/// to true and false otherwise. Null values are ignored by default.
|
||||
/// If null values are taken into account by setting ScalarAggregateOptions
|
||||
/// parameter skip_nulls = false then Kleene logic is used.
|
||||
/// See KleeneAnd for more details on Kleene logic.
|
||||
///
|
||||
/// \param[in] value input datum, expecting a boolean array
|
||||
/// \param[in] options see ScalarAggregateOptions for more information
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return resulting datum as a BooleanScalar
|
||||
|
||||
/// \since 3.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> All(
|
||||
const Datum& value,
|
||||
const ScalarAggregateOptions& options = ScalarAggregateOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Calculate the modal (most common) value of a numeric array
|
||||
///
|
||||
/// This function returns top-n most common values and number of times they occur as
|
||||
/// an array of `struct<mode: T, count: int64>`, where T is the input type.
|
||||
/// Values with larger counts are returned before smaller ones.
|
||||
/// If there are more than one values with same count, smaller value is returned first.
|
||||
///
|
||||
/// \param[in] value input datum, expecting Array or ChunkedArray
|
||||
/// \param[in] options see ModeOptions for more information
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return resulting datum as an array of struct<mode: T, count: int64>
|
||||
///
|
||||
/// \since 2.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Mode(const Datum& value,
|
||||
const ModeOptions& options = ModeOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Calculate the standard deviation of a numeric array
|
||||
///
|
||||
/// \param[in] value input datum, expecting Array or ChunkedArray
|
||||
/// \param[in] options see VarianceOptions for more information
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return datum of the computed standard deviation as a DoubleScalar
|
||||
///
|
||||
/// \since 2.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Stddev(const Datum& value,
|
||||
const VarianceOptions& options = VarianceOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Calculate the variance of a numeric array
|
||||
///
|
||||
/// \param[in] value input datum, expecting Array or ChunkedArray
|
||||
/// \param[in] options see VarianceOptions for more information
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return datum of the computed variance as a DoubleScalar
|
||||
///
|
||||
/// \since 2.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Variance(const Datum& value,
|
||||
const VarianceOptions& options = VarianceOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Calculate the quantiles of a numeric array
|
||||
///
|
||||
/// \param[in] value input datum, expecting Array or ChunkedArray
|
||||
/// \param[in] options see QuantileOptions for more information
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return resulting datum as an array
|
||||
///
|
||||
/// \since 4.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Quantile(const Datum& value,
|
||||
const QuantileOptions& options = QuantileOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Calculate the approximate quantiles of a numeric array with T-Digest algorithm
|
||||
///
|
||||
/// \param[in] value input datum, expecting Array or ChunkedArray
|
||||
/// \param[in] options see TDigestOptions for more information
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return resulting datum as an array
|
||||
///
|
||||
/// \since 4.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> TDigest(const Datum& value,
|
||||
const TDigestOptions& options = TDigestOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Find the first index of a value in an array.
|
||||
///
|
||||
/// \param[in] value The array to search.
|
||||
/// \param[in] options The array to search for. See IndexOoptions.
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return out a Scalar containing the index (or -1 if not found).
|
||||
///
|
||||
/// \since 5.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Index(const Datum& value, const IndexOptions& options,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,598 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "arrow/compute/function.h"
|
||||
#include "arrow/datum.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/type_fwd.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
class ExecContext;
|
||||
|
||||
/// \addtogroup compute-concrete-options
|
||||
/// @{
|
||||
|
||||
class ARROW_EXPORT FilterOptions : public FunctionOptions {
|
||||
public:
|
||||
/// Configure the action taken when a slot of the selection mask is null
|
||||
enum NullSelectionBehavior {
|
||||
/// The corresponding filtered value will be removed in the output.
|
||||
DROP,
|
||||
/// The corresponding filtered value will be null in the output.
|
||||
EMIT_NULL,
|
||||
};
|
||||
|
||||
explicit FilterOptions(NullSelectionBehavior null_selection = DROP);
|
||||
static constexpr char const kTypeName[] = "FilterOptions";
|
||||
static FilterOptions Defaults() { return FilterOptions(); }
|
||||
|
||||
NullSelectionBehavior null_selection_behavior = DROP;
|
||||
};
|
||||
|
||||
class ARROW_EXPORT TakeOptions : public FunctionOptions {
|
||||
public:
|
||||
explicit TakeOptions(bool boundscheck = true);
|
||||
static constexpr char const kTypeName[] = "TakeOptions";
|
||||
static TakeOptions BoundsCheck() { return TakeOptions(true); }
|
||||
static TakeOptions NoBoundsCheck() { return TakeOptions(false); }
|
||||
static TakeOptions Defaults() { return BoundsCheck(); }
|
||||
|
||||
bool boundscheck = true;
|
||||
};
|
||||
|
||||
/// \brief Options for the dictionary encode function
|
||||
class ARROW_EXPORT DictionaryEncodeOptions : public FunctionOptions {
|
||||
public:
|
||||
/// Configure how null values will be encoded
|
||||
enum NullEncodingBehavior {
|
||||
/// The null value will be added to the dictionary with a proper index.
|
||||
ENCODE,
|
||||
/// The null value will be masked in the indices array.
|
||||
MASK
|
||||
};
|
||||
|
||||
explicit DictionaryEncodeOptions(NullEncodingBehavior null_encoding = MASK);
|
||||
static constexpr char const kTypeName[] = "DictionaryEncodeOptions";
|
||||
static DictionaryEncodeOptions Defaults() { return DictionaryEncodeOptions(); }
|
||||
|
||||
NullEncodingBehavior null_encoding_behavior = MASK;
|
||||
};
|
||||
|
||||
enum class SortOrder {
|
||||
/// Arrange values in increasing order
|
||||
Ascending,
|
||||
/// Arrange values in decreasing order
|
||||
Descending,
|
||||
};
|
||||
|
||||
enum class NullPlacement {
|
||||
/// Place nulls and NaNs before any non-null values.
|
||||
/// NaNs will come after nulls.
|
||||
AtStart,
|
||||
/// Place nulls and NaNs after any non-null values.
|
||||
/// NaNs will come before nulls.
|
||||
AtEnd,
|
||||
};
|
||||
|
||||
/// \brief One sort key for PartitionNthIndices (TODO) and SortIndices
|
||||
class ARROW_EXPORT SortKey : public util::EqualityComparable<SortKey> {
|
||||
public:
|
||||
explicit SortKey(FieldRef target, SortOrder order = SortOrder::Ascending)
|
||||
: target(std::move(target)), order(order) {}
|
||||
|
||||
using util::EqualityComparable<SortKey>::Equals;
|
||||
using util::EqualityComparable<SortKey>::operator==;
|
||||
using util::EqualityComparable<SortKey>::operator!=;
|
||||
bool Equals(const SortKey& other) const;
|
||||
std::string ToString() const;
|
||||
|
||||
/// A FieldRef targetting the sort column.
|
||||
FieldRef target;
|
||||
/// How to order by this sort key.
|
||||
SortOrder order;
|
||||
};
|
||||
|
||||
class ARROW_EXPORT ArraySortOptions : public FunctionOptions {
|
||||
public:
|
||||
explicit ArraySortOptions(SortOrder order = SortOrder::Ascending,
|
||||
NullPlacement null_placement = NullPlacement::AtEnd);
|
||||
static constexpr char const kTypeName[] = "ArraySortOptions";
|
||||
static ArraySortOptions Defaults() { return ArraySortOptions(); }
|
||||
|
||||
/// Sorting order
|
||||
SortOrder order;
|
||||
/// Whether nulls and NaNs are placed at the start or at the end
|
||||
NullPlacement null_placement;
|
||||
};
|
||||
|
||||
class ARROW_EXPORT SortOptions : public FunctionOptions {
|
||||
public:
|
||||
explicit SortOptions(std::vector<SortKey> sort_keys = {},
|
||||
NullPlacement null_placement = NullPlacement::AtEnd);
|
||||
static constexpr char const kTypeName[] = "SortOptions";
|
||||
static SortOptions Defaults() { return SortOptions(); }
|
||||
|
||||
/// Column key(s) to order by and how to order by these sort keys.
|
||||
std::vector<SortKey> sort_keys;
|
||||
/// Whether nulls and NaNs are placed at the start or at the end
|
||||
NullPlacement null_placement;
|
||||
};
|
||||
|
||||
/// \brief SelectK options
|
||||
class ARROW_EXPORT SelectKOptions : public FunctionOptions {
|
||||
public:
|
||||
explicit SelectKOptions(int64_t k = -1, std::vector<SortKey> sort_keys = {});
|
||||
static constexpr char const kTypeName[] = "SelectKOptions";
|
||||
static SelectKOptions Defaults() { return SelectKOptions(); }
|
||||
|
||||
static SelectKOptions TopKDefault(int64_t k, std::vector<std::string> key_names = {}) {
|
||||
std::vector<SortKey> keys;
|
||||
for (const auto& name : key_names) {
|
||||
keys.emplace_back(SortKey(name, SortOrder::Descending));
|
||||
}
|
||||
if (key_names.empty()) {
|
||||
keys.emplace_back(SortKey("not-used", SortOrder::Descending));
|
||||
}
|
||||
return SelectKOptions{k, keys};
|
||||
}
|
||||
static SelectKOptions BottomKDefault(int64_t k,
|
||||
std::vector<std::string> key_names = {}) {
|
||||
std::vector<SortKey> keys;
|
||||
for (const auto& name : key_names) {
|
||||
keys.emplace_back(SortKey(name, SortOrder::Ascending));
|
||||
}
|
||||
if (key_names.empty()) {
|
||||
keys.emplace_back(SortKey("not-used", SortOrder::Ascending));
|
||||
}
|
||||
return SelectKOptions{k, keys};
|
||||
}
|
||||
|
||||
/// The number of `k` elements to keep.
|
||||
int64_t k;
|
||||
/// Column key(s) to order by and how to order by these sort keys.
|
||||
std::vector<SortKey> sort_keys;
|
||||
};
|
||||
|
||||
/// \brief Rank options
|
||||
class ARROW_EXPORT RankOptions : public FunctionOptions {
|
||||
public:
|
||||
/// Configure how ties between equal values are handled
|
||||
enum Tiebreaker {
|
||||
/// Ties get the smallest possible rank in sorted order.
|
||||
Min,
|
||||
/// Ties get the largest possible rank in sorted order.
|
||||
Max,
|
||||
/// Ranks are assigned in order of when ties appear in the input.
|
||||
/// This ensures the ranks are a stable permutation of the input.
|
||||
First,
|
||||
/// The ranks span a dense [1, M] interval where M is the number
|
||||
/// of distinct values in the input.
|
||||
Dense
|
||||
};
|
||||
|
||||
explicit RankOptions(std::vector<SortKey> sort_keys = {},
|
||||
NullPlacement null_placement = NullPlacement::AtEnd,
|
||||
Tiebreaker tiebreaker = RankOptions::First);
|
||||
/// Convenience constructor for array inputs
|
||||
explicit RankOptions(SortOrder order,
|
||||
NullPlacement null_placement = NullPlacement::AtEnd,
|
||||
Tiebreaker tiebreaker = RankOptions::First)
|
||||
: RankOptions({SortKey("", order)}, null_placement, tiebreaker) {}
|
||||
|
||||
static constexpr char const kTypeName[] = "RankOptions";
|
||||
static RankOptions Defaults() { return RankOptions(); }
|
||||
|
||||
/// Column key(s) to order by and how to order by these sort keys.
|
||||
std::vector<SortKey> sort_keys;
|
||||
/// Whether nulls and NaNs are placed at the start or at the end
|
||||
NullPlacement null_placement;
|
||||
/// Tiebreaker for dealing with equal values in ranks
|
||||
Tiebreaker tiebreaker;
|
||||
};
|
||||
|
||||
/// \brief Partitioning options for NthToIndices
|
||||
class ARROW_EXPORT PartitionNthOptions : public FunctionOptions {
|
||||
public:
|
||||
explicit PartitionNthOptions(int64_t pivot,
|
||||
NullPlacement null_placement = NullPlacement::AtEnd);
|
||||
PartitionNthOptions() : PartitionNthOptions(0) {}
|
||||
static constexpr char const kTypeName[] = "PartitionNthOptions";
|
||||
|
||||
/// The index into the equivalent sorted array of the partition pivot element.
|
||||
int64_t pivot;
|
||||
/// Whether nulls and NaNs are partitioned at the start or at the end
|
||||
NullPlacement null_placement;
|
||||
};
|
||||
|
||||
/// \brief Options for cumulative sum function
|
||||
class ARROW_EXPORT CumulativeSumOptions : public FunctionOptions {
|
||||
public:
|
||||
explicit CumulativeSumOptions(double start = 0, bool skip_nulls = false,
|
||||
bool check_overflow = false);
|
||||
explicit CumulativeSumOptions(std::shared_ptr<Scalar> start, bool skip_nulls = false,
|
||||
bool check_overflow = false);
|
||||
static constexpr char const kTypeName[] = "CumulativeSumOptions";
|
||||
static CumulativeSumOptions Defaults() { return CumulativeSumOptions(); }
|
||||
|
||||
/// Optional starting value for cumulative operation computation
|
||||
std::shared_ptr<Scalar> start;
|
||||
|
||||
/// If true, nulls in the input are ignored and produce a corresponding null output.
|
||||
/// When false, the first null encountered is propagated through the remaining output.
|
||||
bool skip_nulls = false;
|
||||
|
||||
/// When true, returns an Invalid Status when overflow is detected
|
||||
bool check_overflow = false;
|
||||
};
|
||||
|
||||
/// @}
|
||||
|
||||
/// \brief Filter with a boolean selection filter
|
||||
///
|
||||
/// The output will be populated with values from the input at positions
|
||||
/// where the selection filter is not 0. Nulls in the filter will be handled
|
||||
/// based on options.null_selection_behavior.
|
||||
///
|
||||
/// For example given values = ["a", "b", "c", null, "e", "f"] and
|
||||
/// filter = [0, 1, 1, 0, null, 1], the output will be
|
||||
/// (null_selection_behavior == DROP) = ["b", "c", "f"]
|
||||
/// (null_selection_behavior == EMIT_NULL) = ["b", "c", null, "f"]
|
||||
///
|
||||
/// \param[in] values array to filter
|
||||
/// \param[in] filter indicates which values should be filtered out
|
||||
/// \param[in] options configures null_selection_behavior
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return the resulting datum
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Filter(const Datum& values, const Datum& filter,
|
||||
const FilterOptions& options = FilterOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
namespace internal {
|
||||
|
||||
// These internal functions are implemented in kernels/vector_selection.cc
|
||||
|
||||
/// \brief Return the number of selected indices in the boolean filter
|
||||
ARROW_EXPORT
|
||||
int64_t GetFilterOutputSize(const ArraySpan& filter,
|
||||
FilterOptions::NullSelectionBehavior null_selection);
|
||||
|
||||
/// \brief Compute uint64 selection indices for use with Take given a boolean
|
||||
/// filter
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<ArrayData>> GetTakeIndices(
|
||||
const ArraySpan& filter, FilterOptions::NullSelectionBehavior null_selection,
|
||||
MemoryPool* memory_pool = default_memory_pool());
|
||||
|
||||
} // namespace internal
|
||||
|
||||
/// \brief ReplaceWithMask replaces each value in the array corresponding
|
||||
/// to a true value in the mask with the next element from `replacements`.
|
||||
///
|
||||
/// \param[in] values Array input to replace
|
||||
/// \param[in] mask Array or Scalar of Boolean mask values
|
||||
/// \param[in] replacements The replacement values to draw from. There must
|
||||
/// be as many replacement values as true values in the mask.
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
///
|
||||
/// \return the resulting datum
|
||||
///
|
||||
/// \since 5.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> ReplaceWithMask(const Datum& values, const Datum& mask,
|
||||
const Datum& replacements, ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief FillNullForward fill null values in forward direction
|
||||
///
|
||||
/// The output array will be of the same type as the input values
|
||||
/// array, with replaced null values in forward direction.
|
||||
///
|
||||
/// For example given values = ["a", "b", "c", null, null, "f"],
|
||||
/// the output will be = ["a", "b", "c", "c", "c", "f"]
|
||||
///
|
||||
/// \param[in] values datum from which to take
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return the resulting datum
|
||||
ARROW_EXPORT
|
||||
Result<Datum> FillNullForward(const Datum& values, ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief FillNullBackward fill null values in backward direction
|
||||
///
|
||||
/// The output array will be of the same type as the input values
|
||||
/// array, with replaced null values in backward direction.
|
||||
///
|
||||
/// For example given values = ["a", "b", "c", null, null, "f"],
|
||||
/// the output will be = ["a", "b", "c", "f", "f", "f"]
|
||||
///
|
||||
/// \param[in] values datum from which to take
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return the resulting datum
|
||||
ARROW_EXPORT
|
||||
Result<Datum> FillNullBackward(const Datum& values, ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Take from an array of values at indices in another array
|
||||
///
|
||||
/// The output array will be of the same type as the input values
|
||||
/// array, with elements taken from the values array at the given
|
||||
/// indices. If an index is null then the taken element will be null.
|
||||
///
|
||||
/// For example given values = ["a", "b", "c", null, "e", "f"] and
|
||||
/// indices = [2, 1, null, 3], the output will be
|
||||
/// = [values[2], values[1], null, values[3]]
|
||||
/// = ["c", "b", null, null]
|
||||
///
|
||||
/// \param[in] values datum from which to take
|
||||
/// \param[in] indices which values to take
|
||||
/// \param[in] options options
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return the resulting datum
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Take(const Datum& values, const Datum& indices,
|
||||
const TakeOptions& options = TakeOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Take with Array inputs and output
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Array>> Take(const Array& values, const Array& indices,
|
||||
const TakeOptions& options = TakeOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Drop Null from an array of values
|
||||
///
|
||||
/// The output array will be of the same type as the input values
|
||||
/// array, with elements taken from the values array without nulls.
|
||||
///
|
||||
/// For example given values = ["a", "b", "c", null, "e", "f"],
|
||||
/// the output will be = ["a", "b", "c", "e", "f"]
|
||||
///
|
||||
/// \param[in] values datum from which to take
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return the resulting datum
|
||||
ARROW_EXPORT
|
||||
Result<Datum> DropNull(const Datum& values, ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief DropNull with Array inputs and output
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Array>> DropNull(const Array& values, ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Return indices that partition an array around n-th sorted element.
|
||||
///
|
||||
/// Find index of n-th(0 based) smallest value and perform indirect
|
||||
/// partition of an array around that element. Output indices[0 ~ n-1]
|
||||
/// holds values no greater than n-th element, and indices[n+1 ~ end]
|
||||
/// holds values no less than n-th element. Elements in each partition
|
||||
/// is not sorted. Nulls will be partitioned to the end of the output.
|
||||
/// Output is not guaranteed to be stable.
|
||||
///
|
||||
/// \param[in] values array to be partitioned
|
||||
/// \param[in] n pivot array around sorted n-th element
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return offsets indices that would partition an array
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Array>> NthToIndices(const Array& values, int64_t n,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Return indices that partition an array around n-th sorted element.
|
||||
///
|
||||
/// This overload takes a PartitionNthOptions specifiying the pivot index
|
||||
/// and the null handling.
|
||||
///
|
||||
/// \param[in] values array to be partitioned
|
||||
/// \param[in] options options including pivot index and null handling
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return offsets indices that would partition an array
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Array>> NthToIndices(const Array& values,
|
||||
const PartitionNthOptions& options,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Return indices that would select the first `k` elements.
|
||||
///
|
||||
/// Perform an indirect sort of the datum, keeping only the first `k` elements. The output
|
||||
/// array will contain indices such that the item indicated by the k-th index will be in
|
||||
/// the position it would be if the datum were sorted by `options.sort_keys`. However,
|
||||
/// indices of null values will not be part of the output. The sort is not guaranteed to
|
||||
/// be stable.
|
||||
///
|
||||
/// \param[in] datum datum to be partitioned
|
||||
/// \param[in] options options
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return a datum with the same schema as the input
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Array>> SelectKUnstable(const Datum& datum,
|
||||
const SelectKOptions& options,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Return the indices that would sort an array.
|
||||
///
|
||||
/// Perform an indirect sort of array. The output array will contain
|
||||
/// indices that would sort an array, which would be the same length
|
||||
/// as input. Nulls will be stably partitioned to the end of the output
|
||||
/// regardless of order.
|
||||
///
|
||||
/// For example given array = [null, 1, 3.3, null, 2, 5.3] and order
|
||||
/// = SortOrder::DESCENDING, the output will be [5, 2, 4, 1, 0,
|
||||
/// 3].
|
||||
///
|
||||
/// \param[in] array array to sort
|
||||
/// \param[in] order ascending or descending
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return offsets indices that would sort an array
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Array>> SortIndices(const Array& array,
|
||||
SortOrder order = SortOrder::Ascending,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Return the indices that would sort an array.
|
||||
///
|
||||
/// This overload takes a ArraySortOptions specifiying the sort order
|
||||
/// and the null handling.
|
||||
///
|
||||
/// \param[in] array array to sort
|
||||
/// \param[in] options options including sort order and null handling
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return offsets indices that would sort an array
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Array>> SortIndices(const Array& array,
|
||||
const ArraySortOptions& options,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Return the indices that would sort a chunked array.
|
||||
///
|
||||
/// Perform an indirect sort of chunked array. The output array will
|
||||
/// contain indices that would sort a chunked array, which would be
|
||||
/// the same length as input. Nulls will be stably partitioned to the
|
||||
/// end of the output regardless of order.
|
||||
///
|
||||
/// For example given chunked_array = [[null, 1], [3.3], [null, 2,
|
||||
/// 5.3]] and order = SortOrder::DESCENDING, the output will be [5, 2,
|
||||
/// 4, 1, 0, 3].
|
||||
///
|
||||
/// \param[in] chunked_array chunked array to sort
|
||||
/// \param[in] order ascending or descending
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return offsets indices that would sort an array
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Array>> SortIndices(const ChunkedArray& chunked_array,
|
||||
SortOrder order = SortOrder::Ascending,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Return the indices that would sort a chunked array.
|
||||
///
|
||||
/// This overload takes a ArraySortOptions specifiying the sort order
|
||||
/// and the null handling.
|
||||
///
|
||||
/// \param[in] chunked_array chunked array to sort
|
||||
/// \param[in] options options including sort order and null handling
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return offsets indices that would sort an array
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Array>> SortIndices(const ChunkedArray& chunked_array,
|
||||
const ArraySortOptions& options,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Return the indices that would sort an input in the
|
||||
/// specified order. Input is one of array, chunked array record batch
|
||||
/// or table.
|
||||
///
|
||||
/// Perform an indirect sort of input. The output array will contain
|
||||
/// indices that would sort an input, which would be the same length
|
||||
/// as input. Nulls will be stably partitioned to the start or to the end
|
||||
/// of the output depending on SortOrder::null_placement.
|
||||
///
|
||||
/// For example given input (table) = {
|
||||
/// "column1": [[null, 1], [ 3, null, 2, 1]],
|
||||
/// "column2": [[ 5], [3, null, null, 5, 5]],
|
||||
/// } and options = {
|
||||
/// {"column1", SortOrder::Ascending},
|
||||
/// {"column2", SortOrder::Descending},
|
||||
/// }, the output will be [5, 1, 4, 2, 0, 3].
|
||||
///
|
||||
/// \param[in] datum array, chunked array, record batch or table to sort
|
||||
/// \param[in] options options
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return offsets indices that would sort a table
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Array>> SortIndices(const Datum& datum, const SortOptions& options,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Compute unique elements from an array-like object
|
||||
///
|
||||
/// Note if a null occurs in the input it will NOT be included in the output.
|
||||
///
|
||||
/// \param[in] datum array-like input
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return result as Array
|
||||
///
|
||||
/// \since 1.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Array>> Unique(const Datum& datum, ExecContext* ctx = NULLPTR);
|
||||
|
||||
// Constants for accessing the output of ValueCounts
|
||||
ARROW_EXPORT extern const char kValuesFieldName[];
|
||||
ARROW_EXPORT extern const char kCountsFieldName[];
|
||||
ARROW_EXPORT extern const int32_t kValuesFieldIndex;
|
||||
ARROW_EXPORT extern const int32_t kCountsFieldIndex;
|
||||
|
||||
/// \brief Return counts of unique elements from an array-like object.
|
||||
///
|
||||
/// Note that the counts do not include counts for nulls in the array. These can be
|
||||
/// obtained separately from metadata.
|
||||
///
|
||||
/// For floating point arrays there is no attempt to normalize -0.0, 0.0 and NaN values
|
||||
/// which can lead to unexpected results if the input Array has these values.
|
||||
///
|
||||
/// \param[in] value array-like input
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return counts An array of <input type "Values", int64_t "Counts"> structs.
|
||||
///
|
||||
/// \since 1.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<StructArray>> ValueCounts(const Datum& value,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Dictionary-encode values in an array-like object
|
||||
///
|
||||
/// Any nulls encountered in the dictionary will be handled according to the
|
||||
/// specified null encoding behavior.
|
||||
///
|
||||
/// For example, given values ["a", "b", null, "a", null] the output will be
|
||||
/// (null_encoding == ENCODE) Indices: [0, 1, 2, 0, 2] / Dict: ["a", "b", null]
|
||||
/// (null_encoding == MASK) Indices: [0, 1, null, 0, null] / Dict: ["a", "b"]
|
||||
///
|
||||
/// If the input is already dictionary encoded this function is a no-op unless
|
||||
/// it needs to modify the null_encoding (TODO)
|
||||
///
|
||||
/// \param[in] data array-like input
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \param[in] options configures null encoding behavior
|
||||
/// \return result with same shape and type as input
|
||||
///
|
||||
/// \since 1.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> DictionaryEncode(
|
||||
const Datum& data,
|
||||
const DictionaryEncodeOptions& options = DictionaryEncodeOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
ARROW_EXPORT
|
||||
Result<Datum> CumulativeSum(
|
||||
const Datum& values,
|
||||
const CumulativeSumOptions& options = CumulativeSumOptions::Defaults(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
// Deprecated functions
|
||||
|
||||
ARROW_DEPRECATED("Deprecated in 3.0.0. Use SortIndices()")
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Array>> SortToIndices(const Array& values,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,124 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/function.h"
|
||||
#include "arrow/compute/type_fwd.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/status.h"
|
||||
#include "arrow/type.h"
|
||||
#include "arrow/util/macros.h"
|
||||
#include "arrow/util/visibility.h"
|
||||
|
||||
namespace arrow {
|
||||
|
||||
class Array;
|
||||
|
||||
namespace compute {
|
||||
|
||||
class ExecContext;
|
||||
|
||||
/// \addtogroup compute-concrete-options
|
||||
/// @{
|
||||
|
||||
class ARROW_EXPORT CastOptions : public FunctionOptions {
|
||||
public:
|
||||
explicit CastOptions(bool safe = true);
|
||||
|
||||
static constexpr char const kTypeName[] = "CastOptions";
|
||||
static CastOptions Safe(TypeHolder to_type = {}) {
|
||||
CastOptions safe(true);
|
||||
safe.to_type = std::move(to_type);
|
||||
return safe;
|
||||
}
|
||||
|
||||
static CastOptions Unsafe(TypeHolder to_type = {}) {
|
||||
CastOptions unsafe(false);
|
||||
unsafe.to_type = std::move(to_type);
|
||||
return unsafe;
|
||||
}
|
||||
|
||||
// Type being casted to. May be passed separate to eager function
|
||||
// compute::Cast
|
||||
TypeHolder to_type;
|
||||
|
||||
bool allow_int_overflow;
|
||||
bool allow_time_truncate;
|
||||
bool allow_time_overflow;
|
||||
bool allow_decimal_truncate;
|
||||
bool allow_float_truncate;
|
||||
// Indicate if conversions from Binary/FixedSizeBinary to string must
|
||||
// validate the utf8 payload.
|
||||
bool allow_invalid_utf8;
|
||||
};
|
||||
|
||||
/// @}
|
||||
|
||||
/// \brief Return true if a cast function is defined
|
||||
ARROW_EXPORT
|
||||
bool CanCast(const DataType& from_type, const DataType& to_type);
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
// Convenience invocation APIs for a number of kernels
|
||||
|
||||
/// \brief Cast from one array type to another
|
||||
/// \param[in] value array to cast
|
||||
/// \param[in] to_type type to cast to
|
||||
/// \param[in] options casting options
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return the resulting array
|
||||
///
|
||||
/// \since 1.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Array>> Cast(const Array& value, const TypeHolder& to_type,
|
||||
const CastOptions& options = CastOptions::Safe(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Cast from one array type to another
|
||||
/// \param[in] value array to cast
|
||||
/// \param[in] options casting options. The "to_type" field must be populated
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return the resulting array
|
||||
///
|
||||
/// \since 1.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Cast(const Datum& value, const CastOptions& options,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Cast from one value to another
|
||||
/// \param[in] value datum to cast
|
||||
/// \param[in] to_type type to cast to
|
||||
/// \param[in] options casting options
|
||||
/// \param[in] ctx the function execution context, optional
|
||||
/// \return the resulting datum
|
||||
///
|
||||
/// \since 1.0.0
|
||||
/// \note API not yet finalized
|
||||
ARROW_EXPORT
|
||||
Result<Datum> Cast(const Datum& value, const TypeHolder& to_type,
|
||||
const CastOptions& options = CastOptions::Safe(),
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,460 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
// NOTE: API is EXPERIMENTAL and will change without going through a
|
||||
// deprecation cycle
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/array/data.h"
|
||||
#include "arrow/compute/exec/expression.h"
|
||||
#include "arrow/compute/type_fwd.h"
|
||||
#include "arrow/datum.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/type_fwd.h"
|
||||
#include "arrow/util/macros.h"
|
||||
#include "arrow/util/type_fwd.h"
|
||||
#include "arrow/util/visibility.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
// It seems like 64K might be a good default chunksize to use for execution
|
||||
// based on the experience of other query processing systems. The current
|
||||
// default is not to chunk contiguous arrays, though, but this may change in
|
||||
// the future once parallel execution is implemented
|
||||
static constexpr int64_t kDefaultExecChunksize = UINT16_MAX;
|
||||
|
||||
/// \brief Context for expression-global variables and options used by
|
||||
/// function evaluation
|
||||
class ARROW_EXPORT ExecContext {
|
||||
public:
|
||||
// If no function registry passed, the default is used.
|
||||
explicit ExecContext(MemoryPool* pool = default_memory_pool(),
|
||||
::arrow::internal::Executor* executor = NULLPTR,
|
||||
FunctionRegistry* func_registry = NULLPTR);
|
||||
|
||||
/// \brief The MemoryPool used for allocations, default is
|
||||
/// default_memory_pool().
|
||||
MemoryPool* memory_pool() const { return pool_; }
|
||||
|
||||
const ::arrow::internal::CpuInfo* cpu_info() const;
|
||||
|
||||
/// \brief An Executor which may be used to parallelize execution.
|
||||
::arrow::internal::Executor* executor() const { return executor_; }
|
||||
|
||||
/// \brief The FunctionRegistry for looking up functions by name and
|
||||
/// selecting kernels for execution. Defaults to the library-global function
|
||||
/// registry provided by GetFunctionRegistry.
|
||||
FunctionRegistry* func_registry() const { return func_registry_; }
|
||||
|
||||
// \brief Set maximum length unit of work for kernel execution. Larger
|
||||
// contiguous array inputs will be split into smaller chunks, and, if
|
||||
// possible and enabled, processed in parallel. The default chunksize is
|
||||
// INT64_MAX, so contiguous arrays are not split.
|
||||
void set_exec_chunksize(int64_t chunksize) { exec_chunksize_ = chunksize; }
|
||||
|
||||
// \brief Maximum length for ExecBatch data chunks processed by
|
||||
// kernels. Contiguous array inputs with longer length will be split into
|
||||
// smaller chunks.
|
||||
int64_t exec_chunksize() const { return exec_chunksize_; }
|
||||
|
||||
/// \brief Set whether to use multiple threads for function execution. This
|
||||
/// is not yet used.
|
||||
void set_use_threads(bool use_threads = true) { use_threads_ = use_threads; }
|
||||
|
||||
/// \brief If true, then utilize multiple threads where relevant for function
|
||||
/// execution. This is not yet used.
|
||||
bool use_threads() const { return use_threads_; }
|
||||
|
||||
// Set the preallocation strategy for kernel execution as it relates to
|
||||
// chunked execution. For chunked execution, whether via ChunkedArray inputs
|
||||
// or splitting larger Array arguments into smaller pieces, contiguous
|
||||
// allocation (if permitted by the kernel) will allocate one large array to
|
||||
// write output into yielding it to the caller at the end. If this option is
|
||||
// set to off, then preallocations will be performed independently for each
|
||||
// chunk of execution
|
||||
//
|
||||
// TODO: At some point we might want the limit the size of contiguous
|
||||
// preallocations. For example, even if the exec_chunksize is 64K or less, we
|
||||
// might limit contiguous allocations to 1M records, say.
|
||||
void set_preallocate_contiguous(bool preallocate) {
|
||||
preallocate_contiguous_ = preallocate;
|
||||
}
|
||||
|
||||
/// \brief If contiguous preallocations should be used when doing chunked
|
||||
/// execution as specified by exec_chunksize(). See
|
||||
/// set_preallocate_contiguous() for more information.
|
||||
bool preallocate_contiguous() const { return preallocate_contiguous_; }
|
||||
|
||||
private:
|
||||
MemoryPool* pool_;
|
||||
::arrow::internal::Executor* executor_;
|
||||
FunctionRegistry* func_registry_;
|
||||
int64_t exec_chunksize_ = std::numeric_limits<int64_t>::max();
|
||||
bool preallocate_contiguous_ = true;
|
||||
bool use_threads_ = true;
|
||||
};
|
||||
|
||||
// TODO: Consider standardizing on uint16 selection vectors and only use them
|
||||
// when we can ensure that each value is 64K length or smaller
|
||||
|
||||
/// \brief Container for an array of value selection indices that were
|
||||
/// materialized from a filter.
|
||||
///
|
||||
/// Columnar query engines (see e.g. [1]) have found that rather than
|
||||
/// materializing filtered data, the filter can instead be converted to an
|
||||
/// array of the "on" indices and then "fusing" these indices in operator
|
||||
/// implementations. This is especially relevant for aggregations but also
|
||||
/// applies to scalar operations.
|
||||
///
|
||||
/// We are not yet using this so this is mostly a placeholder for now.
|
||||
///
|
||||
/// [1]: http://cidrdb.org/cidr2005/papers/P19.pdf
|
||||
class ARROW_EXPORT SelectionVector {
|
||||
public:
|
||||
explicit SelectionVector(std::shared_ptr<ArrayData> data);
|
||||
|
||||
explicit SelectionVector(const Array& arr);
|
||||
|
||||
/// \brief Create SelectionVector from boolean mask
|
||||
static Result<std::shared_ptr<SelectionVector>> FromMask(const BooleanArray& arr);
|
||||
|
||||
const int32_t* indices() const { return indices_; }
|
||||
int32_t length() const;
|
||||
|
||||
private:
|
||||
std::shared_ptr<ArrayData> data_;
|
||||
const int32_t* indices_;
|
||||
};
|
||||
|
||||
/// \brief A unit of work for kernel execution. It contains a collection of
|
||||
/// Array and Scalar values and an optional SelectionVector indicating that
|
||||
/// there is an unmaterialized filter that either must be materialized, or (if
|
||||
/// the kernel supports it) pushed down into the kernel implementation.
|
||||
///
|
||||
/// ExecBatch is semantically similar to RecordBatch in that in a SQL context
|
||||
/// it represents a collection of records, but constant "columns" are
|
||||
/// represented by Scalar values rather than having to be converted into arrays
|
||||
/// with repeated values.
|
||||
///
|
||||
/// TODO: Datum uses arrow/util/variant.h which may be a bit heavier-weight
|
||||
/// than is desirable for this class. Microbenchmarks would help determine for
|
||||
/// sure. See ARROW-8928.
|
||||
|
||||
/// \addtogroup execnode-components
|
||||
/// @{
|
||||
|
||||
struct ARROW_EXPORT ExecBatch {
|
||||
ExecBatch() = default;
|
||||
ExecBatch(std::vector<Datum> values, int64_t length)
|
||||
: values(std::move(values)), length(length) {}
|
||||
|
||||
explicit ExecBatch(const RecordBatch& batch);
|
||||
|
||||
static Result<ExecBatch> Make(std::vector<Datum> values);
|
||||
|
||||
Result<std::shared_ptr<RecordBatch>> ToRecordBatch(
|
||||
std::shared_ptr<Schema> schema, MemoryPool* pool = default_memory_pool()) const;
|
||||
|
||||
/// The values representing positional arguments to be passed to a kernel's
|
||||
/// exec function for processing.
|
||||
std::vector<Datum> values;
|
||||
|
||||
/// A deferred filter represented as an array of indices into the values.
|
||||
///
|
||||
/// For example, the filter [true, true, false, true] would be represented as
|
||||
/// the selection vector [0, 1, 3]. When the selection vector is set,
|
||||
/// ExecBatch::length is equal to the length of this array.
|
||||
std::shared_ptr<SelectionVector> selection_vector;
|
||||
|
||||
/// A predicate Expression guaranteed to evaluate to true for all rows in this batch.
|
||||
Expression guarantee = literal(true);
|
||||
|
||||
/// The semantic length of the ExecBatch. When the values are all scalars,
|
||||
/// the length should be set to 1 for non-aggregate kernels, otherwise the
|
||||
/// length is taken from the array values, except when there is a selection
|
||||
/// vector. When there is a selection vector set, the length of the batch is
|
||||
/// the length of the selection. Aggregate kernels can have an ExecBatch
|
||||
/// formed by projecting just the partition columns from a batch in which
|
||||
/// case, it would have scalar rows with length greater than 1.
|
||||
///
|
||||
/// If the array values are of length 0 then the length is 0 regardless of
|
||||
/// whether any values are Scalar.
|
||||
int64_t length = 0;
|
||||
|
||||
/// \brief The sum of bytes in each buffer referenced by the batch
|
||||
///
|
||||
/// Note: Scalars are not counted
|
||||
/// Note: Some values may referenced only part of a buffer, for
|
||||
/// example, an array with an offset. The actual data
|
||||
/// visible to this batch will be smaller than the total
|
||||
/// buffer size in this case.
|
||||
int64_t TotalBufferSize() const;
|
||||
|
||||
/// \brief Return the value at the i-th index
|
||||
template <typename index_type>
|
||||
inline const Datum& operator[](index_type i) const {
|
||||
return values[i];
|
||||
}
|
||||
|
||||
bool Equals(const ExecBatch& other) const;
|
||||
|
||||
/// \brief A convenience for the number of values / arguments.
|
||||
int num_values() const { return static_cast<int>(values.size()); }
|
||||
|
||||
ExecBatch Slice(int64_t offset, int64_t length) const;
|
||||
|
||||
/// \brief A convenience for returning the types from the batch.
|
||||
std::vector<TypeHolder> GetTypes() const {
|
||||
std::vector<TypeHolder> result;
|
||||
for (const auto& value : this->values) {
|
||||
result.emplace_back(value.type());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string ToString() const;
|
||||
};
|
||||
|
||||
inline bool operator==(const ExecBatch& l, const ExecBatch& r) { return l.Equals(r); }
|
||||
inline bool operator!=(const ExecBatch& l, const ExecBatch& r) { return !l.Equals(r); }
|
||||
|
||||
ARROW_EXPORT void PrintTo(const ExecBatch&, std::ostream*);
|
||||
|
||||
struct ExecValue {
|
||||
ArraySpan array = {};
|
||||
const Scalar* scalar = NULLPTR;
|
||||
|
||||
ExecValue(Scalar* scalar) // NOLINT implicit conversion
|
||||
: scalar(scalar) {}
|
||||
|
||||
ExecValue(ArraySpan array) // NOLINT implicit conversion
|
||||
: array(std::move(array)) {}
|
||||
|
||||
ExecValue(const ArrayData& array) { // NOLINT implicit conversion
|
||||
this->array.SetMembers(array);
|
||||
}
|
||||
|
||||
ExecValue() = default;
|
||||
ExecValue(const ExecValue& other) = default;
|
||||
ExecValue& operator=(const ExecValue& other) = default;
|
||||
ExecValue(ExecValue&& other) = default;
|
||||
ExecValue& operator=(ExecValue&& other) = default;
|
||||
|
||||
int64_t length() const { return this->is_array() ? this->array.length : 1; }
|
||||
|
||||
bool is_array() const { return this->scalar == NULLPTR; }
|
||||
bool is_scalar() const { return !this->is_array(); }
|
||||
|
||||
void SetArray(const ArrayData& array) {
|
||||
this->array.SetMembers(array);
|
||||
this->scalar = NULLPTR;
|
||||
}
|
||||
|
||||
void SetScalar(const Scalar* scalar) { this->scalar = scalar; }
|
||||
|
||||
template <typename ExactType>
|
||||
const ExactType& scalar_as() const {
|
||||
return ::arrow::internal::checked_cast<const ExactType&>(*this->scalar);
|
||||
}
|
||||
|
||||
/// XXX: here temporarily for compatibility with datum, see
|
||||
/// e.g. MakeStructExec in scalar_nested.cc
|
||||
int64_t null_count() const {
|
||||
if (this->is_array()) {
|
||||
return this->array.GetNullCount();
|
||||
} else {
|
||||
return this->scalar->is_valid ? 0 : 1;
|
||||
}
|
||||
}
|
||||
|
||||
const DataType* type() const {
|
||||
if (this->is_array()) {
|
||||
return array.type;
|
||||
} else {
|
||||
return scalar->type.get();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct ARROW_EXPORT ExecResult {
|
||||
// The default value of the variant is ArraySpan
|
||||
std::variant<ArraySpan, std::shared_ptr<ArrayData>> value;
|
||||
|
||||
int64_t length() const {
|
||||
if (this->is_array_span()) {
|
||||
return this->array_span()->length;
|
||||
} else {
|
||||
return this->array_data()->length;
|
||||
}
|
||||
}
|
||||
|
||||
const DataType* type() const {
|
||||
if (this->is_array_span()) {
|
||||
return this->array_span()->type;
|
||||
} else {
|
||||
return this->array_data()->type.get();
|
||||
}
|
||||
}
|
||||
|
||||
ArraySpan* array_span() const {
|
||||
return const_cast<ArraySpan*>(&std::get<ArraySpan>(this->value));
|
||||
}
|
||||
bool is_array_span() const { return this->value.index() == 0; }
|
||||
|
||||
const std::shared_ptr<ArrayData>& array_data() const {
|
||||
return std::get<std::shared_ptr<ArrayData>>(this->value);
|
||||
}
|
||||
|
||||
bool is_array_data() const { return this->value.index() == 1; }
|
||||
};
|
||||
|
||||
/// \brief A "lightweight" column batch object which contains no
|
||||
/// std::shared_ptr objects and does not have any memory ownership
|
||||
/// semantics. Can represent a view onto an "owning" ExecBatch.
|
||||
struct ARROW_EXPORT ExecSpan {
|
||||
ExecSpan() = default;
|
||||
ExecSpan(const ExecSpan& other) = default;
|
||||
ExecSpan& operator=(const ExecSpan& other) = default;
|
||||
ExecSpan(ExecSpan&& other) = default;
|
||||
ExecSpan& operator=(ExecSpan&& other) = default;
|
||||
|
||||
explicit ExecSpan(std::vector<ExecValue> values, int64_t length)
|
||||
: length(length), values(std::move(values)) {}
|
||||
|
||||
explicit ExecSpan(const ExecBatch& batch) {
|
||||
this->length = batch.length;
|
||||
this->values.resize(batch.values.size());
|
||||
for (size_t i = 0; i < batch.values.size(); ++i) {
|
||||
const Datum& in_value = batch[i];
|
||||
ExecValue* out_value = &this->values[i];
|
||||
if (in_value.is_array()) {
|
||||
out_value->SetArray(*in_value.array());
|
||||
} else {
|
||||
out_value->SetScalar(in_value.scalar().get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// \brief Return the value at the i-th index
|
||||
template <typename index_type>
|
||||
inline const ExecValue& operator[](index_type i) const {
|
||||
return values[i];
|
||||
}
|
||||
|
||||
/// \brief A convenience for the number of values / arguments.
|
||||
int num_values() const { return static_cast<int>(values.size()); }
|
||||
|
||||
std::vector<TypeHolder> GetTypes() const {
|
||||
std::vector<TypeHolder> result;
|
||||
for (const auto& value : this->values) {
|
||||
result.emplace_back(value.type());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
ExecBatch ToExecBatch() const {
|
||||
ExecBatch result;
|
||||
result.length = this->length;
|
||||
for (const ExecValue& value : this->values) {
|
||||
if (value.is_array()) {
|
||||
result.values.push_back(value.array.ToArrayData());
|
||||
} else {
|
||||
result.values.push_back(value.scalar->GetSharedPtr());
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int64_t length = 0;
|
||||
std::vector<ExecValue> values;
|
||||
};
|
||||
|
||||
/// @}
|
||||
|
||||
/// \defgroup compute-call-function One-shot calls to compute functions
|
||||
///
|
||||
/// @{
|
||||
|
||||
/// \brief One-shot invoker for all types of functions.
|
||||
///
|
||||
/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs,
|
||||
/// and wrapping of outputs.
|
||||
ARROW_EXPORT
|
||||
Result<Datum> CallFunction(const std::string& func_name, const std::vector<Datum>& args,
|
||||
const FunctionOptions* options, ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Variant of CallFunction which uses a function's default options.
|
||||
///
|
||||
/// NB: Some functions require FunctionOptions be provided.
|
||||
ARROW_EXPORT
|
||||
Result<Datum> CallFunction(const std::string& func_name, const std::vector<Datum>& args,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief One-shot invoker for all types of functions.
|
||||
///
|
||||
/// Does kernel dispatch, argument checking, iteration of ChunkedArray inputs,
|
||||
/// and wrapping of outputs.
|
||||
ARROW_EXPORT
|
||||
Result<Datum> CallFunction(const std::string& func_name, const ExecBatch& batch,
|
||||
const FunctionOptions* options, ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// \brief Variant of CallFunction which uses a function's default options.
|
||||
///
|
||||
/// NB: Some functions require FunctionOptions be provided.
|
||||
ARROW_EXPORT
|
||||
Result<Datum> CallFunction(const std::string& func_name, const ExecBatch& batch,
|
||||
ExecContext* ctx = NULLPTR);
|
||||
|
||||
/// @}
|
||||
|
||||
/// \defgroup compute-function-executor One-shot calls to obtain function executors
|
||||
///
|
||||
/// @{
|
||||
|
||||
/// \brief One-shot executor provider for all types of functions.
|
||||
///
|
||||
/// This function creates and initializes a `FunctionExecutor` appropriate
|
||||
/// for the given function name, input types and function options.
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<FunctionExecutor>> GetFunctionExecutor(
|
||||
const std::string& func_name, std::vector<TypeHolder> in_types,
|
||||
const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR);
|
||||
|
||||
/// \brief One-shot executor provider for all types of functions.
|
||||
///
|
||||
/// This function creates and initializes a `FunctionExecutor` appropriate
|
||||
/// for the given function name, input types (taken from the Datum arguments)
|
||||
/// and function options.
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<FunctionExecutor>> GetFunctionExecutor(
|
||||
const std::string& func_name, const std::vector<Datum>& args,
|
||||
const FunctionOptions* options = NULLPTR, FunctionRegistry* func_registry = NULLPTR);
|
||||
|
||||
/// @}
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,57 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/exec.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace util {
|
||||
using arrow::compute::ExecBatch;
|
||||
|
||||
/// \brief A container that accumulates batches until they are ready to
|
||||
/// be processed.
|
||||
class AccumulationQueue {
|
||||
public:
|
||||
AccumulationQueue() : row_count_(0) {}
|
||||
~AccumulationQueue() = default;
|
||||
|
||||
// We should never be copying ExecBatch around
|
||||
AccumulationQueue(const AccumulationQueue&) = delete;
|
||||
AccumulationQueue& operator=(const AccumulationQueue&) = delete;
|
||||
|
||||
AccumulationQueue(AccumulationQueue&& that);
|
||||
AccumulationQueue& operator=(AccumulationQueue&& that);
|
||||
|
||||
void Concatenate(AccumulationQueue&& that);
|
||||
void InsertBatch(ExecBatch batch);
|
||||
int64_t row_count() { return row_count_; }
|
||||
size_t batch_count() { return batches_.size(); }
|
||||
bool empty() const { return batches_.empty(); }
|
||||
void Clear();
|
||||
ExecBatch& operator[](size_t i);
|
||||
|
||||
private:
|
||||
int64_t row_count_;
|
||||
std::vector<ExecBatch> batches_;
|
||||
};
|
||||
|
||||
} // namespace util
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,59 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/api_aggregate.h"
|
||||
#include "arrow/compute/exec.h"
|
||||
#include "arrow/compute/exec/exec_plan.h"
|
||||
#include "arrow/compute/kernel.h"
|
||||
#include "arrow/datum.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/util/visibility.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
namespace internal {
|
||||
|
||||
/// Internal use only: helper function for testing HashAggregateKernels.
|
||||
/// For public use see arrow::compute::Grouper or create an execution plan
|
||||
/// and use an aggregate node.
|
||||
ARROW_EXPORT
|
||||
Result<Datum> GroupBy(const std::vector<Datum>& arguments, const std::vector<Datum>& keys,
|
||||
const std::vector<Aggregate>& aggregates, bool use_threads = false,
|
||||
ExecContext* ctx = default_exec_context());
|
||||
|
||||
Result<std::vector<const HashAggregateKernel*>> GetKernels(
|
||||
ExecContext* ctx, const std::vector<Aggregate>& aggregates,
|
||||
const std::vector<TypeHolder>& in_types);
|
||||
|
||||
Result<std::vector<std::unique_ptr<KernelState>>> InitKernels(
|
||||
const std::vector<const HashAggregateKernel*>& kernels, ExecContext* ctx,
|
||||
const std::vector<Aggregate>& aggregates, const std::vector<TypeHolder>& in_types);
|
||||
|
||||
Result<FieldVector> ResolveKernels(
|
||||
const std::vector<Aggregate>& aggregates,
|
||||
const std::vector<const HashAggregateKernel*>& kernels,
|
||||
const std::vector<std::unique_ptr<KernelState>>& states, ExecContext* ctx,
|
||||
const std::vector<TypeHolder>& in_types);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,37 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/exec.h"
|
||||
#include "arrow/compute/exec/options.h"
|
||||
#include "arrow/type.h"
|
||||
#include "arrow/util/visibility.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
namespace asofjoin {
|
||||
|
||||
using AsofJoinKeys = AsofJoinNodeOptions::Keys;
|
||||
|
||||
ARROW_EXPORT Result<std::shared_ptr<Schema>> MakeOutputSchema(
|
||||
const std::vector<std::shared_ptr<Schema>>& input_schema,
|
||||
const std::vector<AsofJoinKeys>& input_keys);
|
||||
|
||||
} // namespace asofjoin
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,44 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
|
||||
#include "arrow/compute/exec.h"
|
||||
#include "arrow/compute/exec/exec_plan.h"
|
||||
#include "arrow/compute/exec/test_util.h"
|
||||
|
||||
namespace arrow {
|
||||
|
||||
namespace compute {
|
||||
|
||||
Status BenchmarkNodeOverhead(benchmark::State& state, int32_t num_batches,
|
||||
int32_t batch_size, arrow::compute::BatchesWithSchema data,
|
||||
std::vector<arrow::compute::Declaration>& node_declarations);
|
||||
|
||||
Status BenchmarkIsolatedNodeOverhead(benchmark::State& state,
|
||||
arrow::compute::Expression expr, int32_t num_batches,
|
||||
int32_t batch_size,
|
||||
arrow::compute::BatchesWithSchema data,
|
||||
std::string factory_name,
|
||||
arrow::compute::ExecNodeOptions& options);
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,324 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#if defined(ARROW_HAVE_AVX2)
|
||||
#include <immintrin.h>
|
||||
#endif
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include "arrow/compute/exec/partition_util.h"
|
||||
#include "arrow/compute/exec/util.h"
|
||||
#include "arrow/memory_pool.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/status.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
// A set of pre-generated bit masks from a 64-bit word.
|
||||
//
|
||||
// It is used to map selected bits of hash to a bit mask that will be used in
|
||||
// a Bloom filter.
|
||||
//
|
||||
// These bit masks need to look random and need to have a similar fractions of
|
||||
// bits set in order for a Bloom filter to have a low false positives rate.
|
||||
//
|
||||
struct ARROW_EXPORT BloomFilterMasks {
|
||||
// Generate all masks as a single bit vector. Each bit offset in this bit
|
||||
// vector corresponds to a single mask.
|
||||
// In each consecutive kBitsPerMask bits, there must be between
|
||||
// kMinBitsSet and kMaxBitsSet bits set.
|
||||
//
|
||||
BloomFilterMasks();
|
||||
|
||||
inline uint64_t mask(int bit_offset) {
|
||||
#if ARROW_LITTLE_ENDIAN
|
||||
return (util::SafeLoadAs<uint64_t>(masks_ + bit_offset / 8) >> (bit_offset % 8)) &
|
||||
kFullMask;
|
||||
#else
|
||||
return (BYTESWAP(util::SafeLoadAs<uint64_t>(masks_ + bit_offset / 8)) >>
|
||||
(bit_offset % 8)) &
|
||||
kFullMask;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Masks are 57 bits long because then they can be accessed at an
|
||||
// arbitrary bit offset using a single unaligned 64-bit load instruction.
|
||||
//
|
||||
static constexpr int kBitsPerMask = 57;
|
||||
static constexpr uint64_t kFullMask = (1ULL << kBitsPerMask) - 1;
|
||||
|
||||
// Minimum and maximum number of bits set in each mask.
|
||||
// This constraint is enforced when generating the bit masks.
|
||||
// Values should be close to each other and chosen as to minimize a Bloom
|
||||
// filter false positives rate.
|
||||
//
|
||||
static constexpr int kMinBitsSet = 4;
|
||||
static constexpr int kMaxBitsSet = 5;
|
||||
|
||||
// Number of generated masks.
|
||||
// Having more masks to choose will improve false positives rate of Bloom
|
||||
// filter but will also use more memory, which may lead to more CPU cache
|
||||
// misses.
|
||||
// The chosen value results in using only a few cache-lines for mask lookups,
|
||||
// while providing a good variety of available bit masks.
|
||||
//
|
||||
static constexpr int kLogNumMasks = 10;
|
||||
static constexpr int kNumMasks = 1 << kLogNumMasks;
|
||||
|
||||
// Data of masks. Masks are stored in a single bit vector. Nth mask is
|
||||
// kBitsPerMask bits starting at bit offset N.
|
||||
//
|
||||
static constexpr int kTotalBytes = (kNumMasks + 64) / 8;
|
||||
uint8_t masks_[kTotalBytes];
|
||||
};
|
||||
|
||||
// A variant of a blocked Bloom filter implementation.
|
||||
// A Bloom filter is a data structure that provides approximate membership test
|
||||
// functionality based only on the hash of the key. Membership test may return
|
||||
// false positives but not false negatives. Approximation of the result allows
|
||||
// in general case (for arbitrary data types of keys) to save on both memory and
|
||||
// lookup cost compared to the accurate membership test.
|
||||
// The accurate test may sometimes still be cheaper for a specific data types
|
||||
// and inputs, e.g. integers from a small range.
|
||||
//
|
||||
// This blocked Bloom filter is optimized for use in hash joins, to achieve a
|
||||
// good balance between the size of the filter, the cost of its building and
|
||||
// querying and the rate of false positives.
|
||||
//
|
||||
class ARROW_EXPORT BlockedBloomFilter {
|
||||
friend class BloomFilterBuilder_SingleThreaded;
|
||||
friend class BloomFilterBuilder_Parallel;
|
||||
|
||||
public:
|
||||
BlockedBloomFilter() : log_num_blocks_(0), num_blocks_(0), blocks_(NULLPTR) {}
|
||||
|
||||
inline bool Find(uint64_t hash) const {
|
||||
uint64_t m = mask(hash);
|
||||
uint64_t b = blocks_[block_id(hash)];
|
||||
return (b & m) == m;
|
||||
}
|
||||
|
||||
// Uses SIMD if available for smaller Bloom filters.
|
||||
// Uses memory prefetching for larger Bloom filters.
|
||||
//
|
||||
void Find(int64_t hardware_flags, int64_t num_rows, const uint32_t* hashes,
|
||||
uint8_t* result_bit_vector, bool enable_prefetch = true) const;
|
||||
void Find(int64_t hardware_flags, int64_t num_rows, const uint64_t* hashes,
|
||||
uint8_t* result_bit_vector, bool enable_prefetch = true) const;
|
||||
|
||||
int log_num_blocks() const { return log_num_blocks_; }
|
||||
|
||||
int NumHashBitsUsed() const;
|
||||
|
||||
bool IsSameAs(const BlockedBloomFilter* other) const;
|
||||
|
||||
int64_t NumBitsSet() const;
|
||||
|
||||
// Folding of a block Bloom filter after the initial version
|
||||
// has been built.
|
||||
//
|
||||
// One of the parameters for creation of Bloom filter is the number
|
||||
// of bits allocated for it. The more bits allocated, the lower the
|
||||
// probability of false positives. A good heuristic is to aim for
|
||||
// half of the bits set in the constructed Bloom filter. This should
|
||||
// result in a good trade off between size (and following cost of
|
||||
// memory accesses) and false positives rate.
|
||||
//
|
||||
// There might have been many duplicate keys in the input provided
|
||||
// to Bloom filter builder. In that case the resulting bit vector
|
||||
// would be more sparse then originally intended. It is possible to
|
||||
// easily correct that and cut in half the size of Bloom filter
|
||||
// after it has already been constructed. The process to do that is
|
||||
// approximately equal to OR-ing bits from upper and lower half (the
|
||||
// way we address these bits when inserting or querying a hash makes
|
||||
// such folding in half possible).
|
||||
//
|
||||
// We will keep folding as long as the fraction of bits set is less
|
||||
// than 1/4. The resulting bit vector density should be in the [1/4,
|
||||
// 1/2) range.
|
||||
//
|
||||
void Fold();
|
||||
|
||||
private:
|
||||
Status CreateEmpty(int64_t num_rows_to_insert, MemoryPool* pool);
|
||||
|
||||
inline void Insert(uint64_t hash) {
|
||||
uint64_t m = mask(hash);
|
||||
uint64_t& b = blocks_[block_id(hash)];
|
||||
b |= m;
|
||||
}
|
||||
|
||||
void Insert(int64_t hardware_flags, int64_t num_rows, const uint32_t* hashes);
|
||||
void Insert(int64_t hardware_flags, int64_t num_rows, const uint64_t* hashes);
|
||||
|
||||
inline uint64_t mask(uint64_t hash) const {
|
||||
// The lowest bits of hash are used to pick mask index.
|
||||
//
|
||||
int mask_id = static_cast<int>(hash & (BloomFilterMasks::kNumMasks - 1));
|
||||
uint64_t result = masks_.mask(mask_id);
|
||||
|
||||
// The next set of hash bits is used to pick the amount of bit
|
||||
// rotation of the mask.
|
||||
//
|
||||
int rotation = (hash >> BloomFilterMasks::kLogNumMasks) & 63;
|
||||
result = ROTL64(result, rotation);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
inline int64_t block_id(uint64_t hash) const {
|
||||
// The next set of hash bits following the bits used to select a
|
||||
// mask is used to pick block id (index of 64-bit word in a bit
|
||||
// vector).
|
||||
//
|
||||
return (hash >> (BloomFilterMasks::kLogNumMasks + 6)) & (num_blocks_ - 1);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void InsertImp(int64_t num_rows, const T* hashes);
|
||||
|
||||
template <typename T>
|
||||
inline void FindImp(int64_t num_rows, const T* hashes, uint8_t* result_bit_vector,
|
||||
bool enable_prefetch) const;
|
||||
|
||||
void SingleFold(int num_folds);
|
||||
|
||||
#if defined(ARROW_HAVE_AVX2)
|
||||
inline __m256i mask_avx2(__m256i hash) const;
|
||||
inline __m256i block_id_avx2(__m256i hash) const;
|
||||
int64_t Insert_avx2(int64_t num_rows, const uint32_t* hashes);
|
||||
int64_t Insert_avx2(int64_t num_rows, const uint64_t* hashes);
|
||||
template <typename T>
|
||||
int64_t InsertImp_avx2(int64_t num_rows, const T* hashes);
|
||||
int64_t Find_avx2(int64_t num_rows, const uint32_t* hashes,
|
||||
uint8_t* result_bit_vector) const;
|
||||
int64_t Find_avx2(int64_t num_rows, const uint64_t* hashes,
|
||||
uint8_t* result_bit_vector) const;
|
||||
template <typename T>
|
||||
int64_t FindImp_avx2(int64_t num_rows, const T* hashes,
|
||||
uint8_t* result_bit_vector) const;
|
||||
#endif
|
||||
|
||||
bool UsePrefetch() const {
|
||||
return num_blocks_ * sizeof(uint64_t) > kPrefetchLimitBytes;
|
||||
}
|
||||
|
||||
static constexpr int64_t kPrefetchLimitBytes = 256 * 1024;
|
||||
|
||||
static BloomFilterMasks masks_;
|
||||
|
||||
// Total number of bits used by block Bloom filter must be a power
|
||||
// of 2.
|
||||
//
|
||||
int log_num_blocks_;
|
||||
int64_t num_blocks_;
|
||||
|
||||
// Buffer allocated to store an array of power of 2 64-bit blocks.
|
||||
//
|
||||
std::shared_ptr<Buffer> buf_;
|
||||
// Pointer to mutable data owned by Buffer
|
||||
//
|
||||
uint64_t* blocks_;
|
||||
};
|
||||
|
||||
// We have two separate implementations of building a Bloom filter, multi-threaded and
|
||||
// single-threaded.
|
||||
//
|
||||
// Single threaded version is useful in two ways:
|
||||
// a) It allows to verify parallel implementation in tests (the single threaded one is
|
||||
// simpler and can be used as the source of truth).
|
||||
// b) It is preferred for small and medium size Bloom filters, because it skips extra
|
||||
// synchronization related steps from parallel variant (partitioning and taking locks).
|
||||
//
|
||||
enum class BloomFilterBuildStrategy {
|
||||
SINGLE_THREADED = 0,
|
||||
PARALLEL = 1,
|
||||
};
|
||||
|
||||
class ARROW_EXPORT BloomFilterBuilder {
|
||||
public:
|
||||
virtual ~BloomFilterBuilder() = default;
|
||||
virtual Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool,
|
||||
int64_t num_rows, int64_t num_batches,
|
||||
BlockedBloomFilter* build_target) = 0;
|
||||
virtual int64_t num_tasks() const { return 0; }
|
||||
virtual Status PushNextBatch(size_t thread_index, int64_t num_rows,
|
||||
const uint32_t* hashes) = 0;
|
||||
virtual Status PushNextBatch(size_t thread_index, int64_t num_rows,
|
||||
const uint64_t* hashes) = 0;
|
||||
virtual void CleanUp() {}
|
||||
static std::unique_ptr<BloomFilterBuilder> Make(BloomFilterBuildStrategy strategy);
|
||||
};
|
||||
|
||||
class ARROW_EXPORT BloomFilterBuilder_SingleThreaded : public BloomFilterBuilder {
|
||||
public:
|
||||
Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool,
|
||||
int64_t num_rows, int64_t num_batches,
|
||||
BlockedBloomFilter* build_target) override;
|
||||
|
||||
Status PushNextBatch(size_t /*thread_index*/, int64_t num_rows,
|
||||
const uint32_t* hashes) override;
|
||||
|
||||
Status PushNextBatch(size_t /*thread_index*/, int64_t num_rows,
|
||||
const uint64_t* hashes) override;
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
void PushNextBatchImp(int64_t num_rows, const T* hashes);
|
||||
|
||||
int64_t hardware_flags_;
|
||||
BlockedBloomFilter* build_target_;
|
||||
};
|
||||
|
||||
class ARROW_EXPORT BloomFilterBuilder_Parallel : public BloomFilterBuilder {
|
||||
public:
|
||||
Status Begin(size_t num_threads, int64_t hardware_flags, MemoryPool* pool,
|
||||
int64_t num_rows, int64_t num_batches,
|
||||
BlockedBloomFilter* build_target) override;
|
||||
|
||||
Status PushNextBatch(size_t thread_id, int64_t num_rows,
|
||||
const uint32_t* hashes) override;
|
||||
|
||||
Status PushNextBatch(size_t thread_id, int64_t num_rows,
|
||||
const uint64_t* hashes) override;
|
||||
|
||||
void CleanUp() override;
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
void PushNextBatchImp(size_t thread_id, int64_t num_rows, const T* hashes);
|
||||
|
||||
int64_t hardware_flags_;
|
||||
BlockedBloomFilter* build_target_;
|
||||
int log_num_prtns_;
|
||||
struct ThreadLocalState {
|
||||
std::vector<uint32_t> partitioned_hashes_32;
|
||||
std::vector<uint64_t> partitioned_hashes_64;
|
||||
std::vector<uint16_t> partition_ranges;
|
||||
std::vector<int> unprocessed_partition_ids;
|
||||
};
|
||||
std::vector<ThreadLocalState> thread_local_states_;
|
||||
PartitionLocks prtn_locks_;
|
||||
};
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,591 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/exec.h"
|
||||
#include "arrow/compute/type_fwd.h"
|
||||
#include "arrow/type_fwd.h"
|
||||
#include "arrow/util/future.h"
|
||||
#include "arrow/util/macros.h"
|
||||
#include "arrow/util/tracing.h"
|
||||
#include "arrow/util/type_fwd.h"
|
||||
#include "arrow/util/visibility.h"
|
||||
|
||||
namespace arrow {
|
||||
|
||||
namespace compute {
|
||||
|
||||
/// \addtogroup execnode-components
|
||||
/// @{
|
||||
|
||||
class ARROW_EXPORT ExecPlan : public std::enable_shared_from_this<ExecPlan> {
|
||||
public:
|
||||
// This allows operators to rely on signed 16-bit indices
|
||||
static const uint32_t kMaxBatchSize = 1 << 15;
|
||||
using NodeVector = std::vector<ExecNode*>;
|
||||
|
||||
virtual ~ExecPlan() = default;
|
||||
|
||||
QueryContext* query_context();
|
||||
|
||||
/// Make an empty exec plan
|
||||
static Result<std::shared_ptr<ExecPlan>> Make(
|
||||
QueryOptions options, ExecContext exec_context = *threaded_exec_context(),
|
||||
std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
|
||||
|
||||
static Result<std::shared_ptr<ExecPlan>> Make(
|
||||
ExecContext exec_context = *threaded_exec_context(),
|
||||
std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
|
||||
|
||||
static Result<std::shared_ptr<ExecPlan>> Make(
|
||||
QueryOptions options, ExecContext* exec_context,
|
||||
std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
|
||||
|
||||
static Result<std::shared_ptr<ExecPlan>> Make(
|
||||
ExecContext* exec_context,
|
||||
std::shared_ptr<const KeyValueMetadata> metadata = NULLPTR);
|
||||
|
||||
ExecNode* AddNode(std::unique_ptr<ExecNode> node);
|
||||
|
||||
template <typename Node, typename... Args>
|
||||
Node* EmplaceNode(Args&&... args) {
|
||||
std::unique_ptr<Node> node{new Node{std::forward<Args>(args)...}};
|
||||
auto out = node.get();
|
||||
AddNode(std::move(node));
|
||||
return out;
|
||||
}
|
||||
|
||||
/// The initial inputs
|
||||
const NodeVector& sources() const;
|
||||
|
||||
/// The final outputs
|
||||
const NodeVector& sinks() const;
|
||||
|
||||
Status Validate();
|
||||
|
||||
/// \brief Start producing on all nodes
|
||||
///
|
||||
/// Nodes are started in reverse topological order, such that any node
|
||||
/// is started before all of its inputs.
|
||||
Status StartProducing();
|
||||
|
||||
/// \brief Stop producing on all nodes
|
||||
///
|
||||
/// Nodes are stopped in topological order, such that any node
|
||||
/// is stopped before all of its outputs.
|
||||
void StopProducing();
|
||||
|
||||
/// \brief A future which will be marked finished when all nodes have stopped producing.
|
||||
Future<> finished();
|
||||
|
||||
/// \brief Return whether the plan has non-empty metadata
|
||||
bool HasMetadata() const;
|
||||
|
||||
/// \brief Return the plan's attached metadata
|
||||
std::shared_ptr<const KeyValueMetadata> metadata() const;
|
||||
|
||||
std::string ToString() const;
|
||||
};
|
||||
|
||||
class ARROW_EXPORT ExecNode {
|
||||
public:
|
||||
using NodeVector = std::vector<ExecNode*>;
|
||||
|
||||
virtual ~ExecNode() = default;
|
||||
|
||||
virtual const char* kind_name() const = 0;
|
||||
|
||||
// The number of inputs/outputs expected by this node
|
||||
int num_inputs() const { return static_cast<int>(inputs_.size()); }
|
||||
int num_outputs() const { return num_outputs_; }
|
||||
|
||||
/// This node's predecessors in the exec plan
|
||||
const NodeVector& inputs() const { return inputs_; }
|
||||
|
||||
/// \brief Labels identifying the function of each input.
|
||||
const std::vector<std::string>& input_labels() const { return input_labels_; }
|
||||
|
||||
/// This node's successors in the exec plan
|
||||
const NodeVector& outputs() const { return outputs_; }
|
||||
|
||||
/// The datatypes for batches produced by this node
|
||||
const std::shared_ptr<Schema>& output_schema() const { return output_schema_; }
|
||||
|
||||
/// This node's exec plan
|
||||
ExecPlan* plan() { return plan_; }
|
||||
|
||||
/// \brief An optional label, for display and debugging
|
||||
///
|
||||
/// There is no guarantee that this value is non-empty or unique.
|
||||
const std::string& label() const { return label_; }
|
||||
void SetLabel(std::string label) { label_ = std::move(label); }
|
||||
|
||||
Status Validate() const;
|
||||
|
||||
/// Upstream API:
|
||||
/// These functions are called by input nodes that want to inform this node
|
||||
/// about an updated condition (a new input batch, an error, an impeding
|
||||
/// end of stream).
|
||||
///
|
||||
/// Implementation rules:
|
||||
/// - these may be called anytime after StartProducing() has succeeded
|
||||
/// (and even during or after StopProducing())
|
||||
/// - these may be called concurrently
|
||||
/// - these are allowed to call back into PauseProducing(), ResumeProducing()
|
||||
/// and StopProducing()
|
||||
|
||||
/// Transfer input batch to ExecNode
|
||||
virtual void InputReceived(ExecNode* input, ExecBatch batch) = 0;
|
||||
|
||||
/// Signal error to ExecNode
|
||||
virtual void ErrorReceived(ExecNode* input, Status error) = 0;
|
||||
|
||||
/// Mark the inputs finished after the given number of batches.
|
||||
///
|
||||
/// This may be called before all inputs are received. This simply fixes
|
||||
/// the total number of incoming batches for an input, so that the ExecNode
|
||||
/// knows when it has received all input, regardless of order.
|
||||
virtual void InputFinished(ExecNode* input, int total_batches) = 0;
|
||||
|
||||
/// \brief Perform any needed initialization
|
||||
///
|
||||
/// This hook performs any actions in between creation of ExecPlan and the call to
|
||||
/// StartProducing. An example could be Bloom filter pushdown. The order of ExecNodes
|
||||
/// that executes this method is undefined, but the calls are made synchronously.
|
||||
///
|
||||
/// At this point a node can rely on all inputs & outputs (and the input schemas)
|
||||
/// being well defined.
|
||||
virtual Status Init();
|
||||
|
||||
/// Lifecycle API:
|
||||
/// - start / stop to initiate and terminate production
|
||||
/// - pause / resume to apply backpressure
|
||||
///
|
||||
/// Implementation rules:
|
||||
/// - StartProducing() should not recurse into the inputs, as it is
|
||||
/// handled by ExecPlan::StartProducing()
|
||||
/// - PauseProducing(), ResumeProducing(), StopProducing() may be called
|
||||
/// concurrently (but only after StartProducing() has returned successfully)
|
||||
/// - PauseProducing(), ResumeProducing(), StopProducing() may be called
|
||||
/// by the downstream nodes' InputReceived(), ErrorReceived(), InputFinished()
|
||||
/// methods
|
||||
/// - StopProducing() should recurse into the inputs
|
||||
/// - StopProducing() must be idempotent
|
||||
|
||||
// XXX What happens if StartProducing() calls an output's InputReceived()
|
||||
// synchronously, and InputReceived() decides to call back into StopProducing()
|
||||
// (or PauseProducing()) because it received enough data?
|
||||
//
|
||||
// Right now, since synchronous calls happen in both directions (input to
|
||||
// output and then output to input), a node must be careful to be reentrant
|
||||
// against synchronous calls from its output, *and* also concurrent calls from
|
||||
// other threads. The most reliable solution is to update the internal state
|
||||
// first, and notify outputs only at the end.
|
||||
//
|
||||
// Alternate rules:
|
||||
// - StartProducing(), ResumeProducing() can call synchronously into
|
||||
// its ouputs' consuming methods (InputReceived() etc.)
|
||||
// - InputReceived(), ErrorReceived(), InputFinished() can call asynchronously
|
||||
// into its inputs' PauseProducing(), StopProducing()
|
||||
//
|
||||
// Alternate API:
|
||||
// - InputReceived(), ErrorReceived(), InputFinished() return a ProductionHint
|
||||
// enum: either None (default), PauseProducing, ResumeProducing, StopProducing
|
||||
// - A method allows passing a ProductionHint asynchronously from an output node
|
||||
// (replacing PauseProducing(), ResumeProducing(), StopProducing())
|
||||
|
||||
// Concurrent calls to PauseProducing and ResumeProducing can be hard to sequence
|
||||
// as they may travel at different speeds through the plan.
|
||||
//
|
||||
// For example, consider a resume that comes quickly after a pause. If the source
|
||||
// receives the resume before the pause the source may think the destination is full
|
||||
// and halt production which would lead to deadlock.
|
||||
//
|
||||
// To resolve this a counter is sent for all calls to pause/resume. Only the call with
|
||||
// the highest counter value is valid. So if a call to PauseProducing(5) comes after
|
||||
// a call to ResumeProducing(6) then the source should continue producing.
|
||||
//
|
||||
// If a node has multiple outputs it should emit a new counter value to its inputs
|
||||
// whenever any of its outputs changes which means the counters sent to inputs may be
|
||||
// larger than the counters received on its outputs.
|
||||
//
|
||||
// A node with multiple outputs will also need to ensure it is applying backpressure if
|
||||
// any of its outputs is asking to pause
|
||||
|
||||
/// \brief Start producing
|
||||
///
|
||||
/// This must only be called once. If this fails, then other lifecycle
|
||||
/// methods must not be called.
|
||||
///
|
||||
/// This is typically called automatically by ExecPlan::StartProducing().
|
||||
virtual Status StartProducing() = 0;
|
||||
|
||||
/// \brief Pause producing temporarily
|
||||
///
|
||||
/// \param output Pointer to the output that is full
|
||||
/// \param counter Counter used to sequence calls to pause/resume
|
||||
///
|
||||
/// This call is a hint that an output node is currently not willing
|
||||
/// to receive data.
|
||||
///
|
||||
/// This may be called any number of times after StartProducing() succeeds.
|
||||
/// However, the node is still free to produce data (which may be difficult
|
||||
/// to prevent anyway if data is produced using multiple threads).
|
||||
virtual void PauseProducing(ExecNode* output, int32_t counter) = 0;
|
||||
|
||||
/// \brief Resume producing after a temporary pause
|
||||
///
|
||||
/// \param output Pointer to the output that is now free
|
||||
/// \param counter Counter used to sequence calls to pause/resume
|
||||
///
|
||||
/// This call is a hint that an output node is willing to receive data again.
|
||||
///
|
||||
/// This may be called any number of times after StartProducing() succeeds.
|
||||
virtual void ResumeProducing(ExecNode* output, int32_t counter) = 0;
|
||||
|
||||
/// \brief Stop producing definitively to a single output
|
||||
///
|
||||
/// This call is a hint that an output node has completed and is not willing
|
||||
/// to receive any further data.
|
||||
virtual void StopProducing(ExecNode* output) = 0;
|
||||
|
||||
/// \brief Stop producing definitively to all outputs
|
||||
virtual void StopProducing() = 0;
|
||||
|
||||
/// \brief A future which will be marked finished when this node has stopped producing.
|
||||
virtual Future<> finished() { return finished_; }
|
||||
|
||||
std::string ToString(int indent = 0) const;
|
||||
|
||||
protected:
|
||||
ExecNode(ExecPlan* plan, NodeVector inputs, std::vector<std::string> input_labels,
|
||||
std::shared_ptr<Schema> output_schema, int num_outputs);
|
||||
|
||||
// A helper method to send an error status to all outputs.
|
||||
// Returns true if the status was an error.
|
||||
bool ErrorIfNotOk(Status status);
|
||||
|
||||
/// Provide extra info to include in the string representation.
|
||||
virtual std::string ToStringExtra(int indent) const;
|
||||
|
||||
ExecPlan* plan_;
|
||||
std::string label_;
|
||||
|
||||
NodeVector inputs_;
|
||||
std::vector<std::string> input_labels_;
|
||||
|
||||
std::shared_ptr<Schema> output_schema_;
|
||||
int num_outputs_;
|
||||
NodeVector outputs_;
|
||||
|
||||
// Future to sync finished
|
||||
Future<> finished_ = Future<>::Make();
|
||||
|
||||
util::tracing::Span span_;
|
||||
};
|
||||
|
||||
/// \brief An extensible registry for factories of ExecNodes
|
||||
class ARROW_EXPORT ExecFactoryRegistry {
|
||||
public:
|
||||
using Factory = std::function<Result<ExecNode*>(ExecPlan*, std::vector<ExecNode*>,
|
||||
const ExecNodeOptions&)>;
|
||||
|
||||
virtual ~ExecFactoryRegistry() = default;
|
||||
|
||||
/// \brief Get the named factory from this registry
|
||||
///
|
||||
/// will raise if factory_name is not found
|
||||
virtual Result<Factory> GetFactory(const std::string& factory_name) = 0;
|
||||
|
||||
/// \brief Add a factory to this registry with the provided name
|
||||
///
|
||||
/// will raise if factory_name is already in the registry
|
||||
virtual Status AddFactory(std::string factory_name, Factory factory) = 0;
|
||||
};
|
||||
|
||||
/// The default registry, which includes built-in factories.
|
||||
ARROW_EXPORT
|
||||
ExecFactoryRegistry* default_exec_factory_registry();
|
||||
|
||||
/// \brief Construct an ExecNode using the named factory
|
||||
inline Result<ExecNode*> MakeExecNode(
|
||||
const std::string& factory_name, ExecPlan* plan, std::vector<ExecNode*> inputs,
|
||||
const ExecNodeOptions& options,
|
||||
ExecFactoryRegistry* registry = default_exec_factory_registry()) {
|
||||
ARROW_ASSIGN_OR_RAISE(auto factory, registry->GetFactory(factory_name));
|
||||
return factory(plan, std::move(inputs), options);
|
||||
}
|
||||
|
||||
/// \brief Helper class for declaring sets of ExecNodes efficiently
|
||||
///
|
||||
/// A Declaration represents an unconstructed ExecNode (and potentially more since its
|
||||
/// inputs may also be Declarations). The node can be constructed and added to a plan
|
||||
/// with Declaration::AddToPlan, which will recursively construct any inputs as necessary.
|
||||
struct ARROW_EXPORT Declaration {
|
||||
using Input = std::variant<ExecNode*, Declaration>;
|
||||
|
||||
Declaration() {}
|
||||
|
||||
Declaration(std::string factory_name, std::vector<Input> inputs,
|
||||
std::shared_ptr<ExecNodeOptions> options, std::string label)
|
||||
: factory_name{std::move(factory_name)},
|
||||
inputs{std::move(inputs)},
|
||||
options{std::move(options)},
|
||||
label{std::move(label)} {}
|
||||
|
||||
template <typename Options>
|
||||
Declaration(std::string factory_name, std::vector<Input> inputs, Options options,
|
||||
std::string label)
|
||||
: Declaration{std::move(factory_name), std::move(inputs),
|
||||
std::shared_ptr<ExecNodeOptions>(
|
||||
std::make_shared<Options>(std::move(options))),
|
||||
std::move(label)} {}
|
||||
|
||||
template <typename Options>
|
||||
Declaration(std::string factory_name, std::vector<Input> inputs, Options options)
|
||||
: Declaration{std::move(factory_name), std::move(inputs), std::move(options),
|
||||
/*label=*/""} {}
|
||||
|
||||
template <typename Options>
|
||||
Declaration(std::string factory_name, Options options)
|
||||
: Declaration{std::move(factory_name), {}, std::move(options), /*label=*/""} {}
|
||||
|
||||
template <typename Options>
|
||||
Declaration(std::string factory_name, Options options, std::string label)
|
||||
: Declaration{std::move(factory_name), {}, std::move(options), std::move(label)} {}
|
||||
|
||||
/// \brief Convenience factory for the common case of a simple sequence of nodes.
|
||||
///
|
||||
/// Each of decls will be appended to the inputs of the subsequent declaration,
|
||||
/// and the final modified declaration will be returned.
|
||||
///
|
||||
/// Without this convenience factory, constructing a sequence would require explicit,
|
||||
/// difficult-to-read nesting:
|
||||
///
|
||||
/// Declaration{"n3",
|
||||
/// {
|
||||
/// Declaration{"n2",
|
||||
/// {
|
||||
/// Declaration{"n1",
|
||||
/// {
|
||||
/// Declaration{"n0", N0Opts{}},
|
||||
/// },
|
||||
/// N1Opts{}},
|
||||
/// },
|
||||
/// N2Opts{}},
|
||||
/// },
|
||||
/// N3Opts{}};
|
||||
///
|
||||
/// An equivalent Declaration can be constructed more tersely using Sequence:
|
||||
///
|
||||
/// Declaration::Sequence({
|
||||
/// {"n0", N0Opts{}},
|
||||
/// {"n1", N1Opts{}},
|
||||
/// {"n2", N2Opts{}},
|
||||
/// {"n3", N3Opts{}},
|
||||
/// });
|
||||
static Declaration Sequence(std::vector<Declaration> decls);
|
||||
|
||||
Result<ExecNode*> AddToPlan(ExecPlan* plan, ExecFactoryRegistry* registry =
|
||||
default_exec_factory_registry()) const;
|
||||
|
||||
// Validate a declaration
|
||||
bool IsValid(ExecFactoryRegistry* registry = default_exec_factory_registry()) const;
|
||||
|
||||
std::string factory_name;
|
||||
std::vector<Input> inputs;
|
||||
std::shared_ptr<ExecNodeOptions> options;
|
||||
std::string label;
|
||||
};
|
||||
|
||||
/// \brief Utility method to run a declaration and collect the results into a table
|
||||
///
|
||||
/// \param declaration A declaration describing the plan to run
|
||||
/// \param use_threads If `use_threads` is false then all CPU work will be done on the
|
||||
/// calling thread. I/O tasks will still happen on the I/O executor
|
||||
/// and may be multi-threaded (but should not use significant CPU
|
||||
/// resources).
|
||||
/// \param memory_pool The memory pool to use for allocations made while running the plan.
|
||||
/// \param function_registry The function registry to use for function execution. If null
|
||||
/// then the default function registry will be used.
|
||||
///
|
||||
/// This method will add a sink node to the declaration to collect results into a
|
||||
/// table. It will then create an ExecPlan from the declaration, start the exec plan,
|
||||
/// block until the plan has finished, and return the created table.
|
||||
ARROW_EXPORT Result<std::shared_ptr<Table>> DeclarationToTable(
|
||||
Declaration declaration, bool use_threads = true,
|
||||
MemoryPool* memory_pool = default_memory_pool(),
|
||||
FunctionRegistry* function_registry = NULLPTR);
|
||||
|
||||
/// \brief Asynchronous version of \see DeclarationToTable
|
||||
///
|
||||
/// \param declaration A declaration describing the plan to run
|
||||
/// \param use_threads The behavior of use_threads is slightly different than the
|
||||
/// synchronous version since we cannot run synchronously on the
|
||||
/// calling thread. Instead, if use_threads=false then a new thread
|
||||
/// pool will be created with a single thread and this will be used for
|
||||
/// all compute work.
|
||||
/// \param memory_pool The memory pool to use for allocations made while running the plan.
|
||||
/// \param function_registry The function registry to use for function execution. If null
|
||||
/// then the default function registry will be used.
|
||||
ARROW_EXPORT Future<std::shared_ptr<Table>> DeclarationToTableAsync(
|
||||
Declaration declaration, bool use_threads = true,
|
||||
MemoryPool* memory_pool = default_memory_pool(),
|
||||
FunctionRegistry* function_registry = NULLPTR);
|
||||
|
||||
/// \brief Overload of \see DeclarationToTableAsync accepting a custom exec context
|
||||
///
|
||||
/// The executor must be specified (cannot be null) and must be kept alive until the
|
||||
/// returned future finishes.
|
||||
ARROW_EXPORT Future<std::shared_ptr<Table>> DeclarationToTableAsync(
|
||||
Declaration declaration, ExecContext custom_exec_context);
|
||||
|
||||
/// \brief a collection of exec batches with a common schema
|
||||
struct BatchesWithCommonSchema {
|
||||
std::vector<ExecBatch> batches;
|
||||
std::shared_ptr<Schema> schema;
|
||||
};
|
||||
|
||||
/// \brief Utility method to run a declaration and collect the results into ExecBatch
|
||||
/// vector
|
||||
///
|
||||
/// \see DeclarationToTable for details on threading & execution
|
||||
ARROW_EXPORT Result<BatchesWithCommonSchema> DeclarationToExecBatches(
|
||||
Declaration declaration, bool use_threads = true,
|
||||
MemoryPool* memory_pool = default_memory_pool(),
|
||||
FunctionRegistry* function_registry = NULLPTR);
|
||||
|
||||
/// \brief Asynchronous version of \see DeclarationToExecBatches
|
||||
///
|
||||
/// \see DeclarationToTableAsync for details on threading & execution
|
||||
ARROW_EXPORT Future<BatchesWithCommonSchema> DeclarationToExecBatchesAsync(
|
||||
Declaration declaration, bool use_threads = true,
|
||||
MemoryPool* memory_pool = default_memory_pool(),
|
||||
FunctionRegistry* function_registry = NULLPTR);
|
||||
|
||||
/// \brief Overload of \see DeclarationToExecBatchesAsync accepting a custom exec context
|
||||
///
|
||||
/// \see DeclarationToTableAsync for details on threading & execution
|
||||
ARROW_EXPORT Future<BatchesWithCommonSchema> DeclarationToExecBatchesAsync(
|
||||
Declaration declaration, ExecContext custom_exec_context);
|
||||
|
||||
/// \brief Utility method to run a declaration and collect the results into a vector
|
||||
///
|
||||
/// \see DeclarationToTable for details on threading & execution
|
||||
ARROW_EXPORT Result<std::vector<std::shared_ptr<RecordBatch>>> DeclarationToBatches(
|
||||
Declaration declaration, bool use_threads = true,
|
||||
MemoryPool* memory_pool = default_memory_pool(),
|
||||
FunctionRegistry* function_registry = NULLPTR);
|
||||
|
||||
/// \brief Asynchronous version of \see DeclarationToBatches
|
||||
///
|
||||
/// \see DeclarationToTableAsync for details on threading & execution
|
||||
ARROW_EXPORT Future<std::vector<std::shared_ptr<RecordBatch>>> DeclarationToBatchesAsync(
|
||||
Declaration declaration, bool use_threads = true,
|
||||
MemoryPool* memory_pool = default_memory_pool(),
|
||||
FunctionRegistry* function_registry = NULLPTR);
|
||||
|
||||
/// \brief Overload of \see DeclarationToBatchesAsync accepting a custom exec context
|
||||
///
|
||||
/// \see DeclarationToTableAsync for details on threading & execution
|
||||
ARROW_EXPORT Future<std::vector<std::shared_ptr<RecordBatch>>> DeclarationToBatchesAsync(
|
||||
Declaration declaration, ExecContext exec_context);
|
||||
|
||||
/// \brief Utility method to run a declaration and return results as a RecordBatchReader
|
||||
///
|
||||
/// If an exec context is not provided then a default exec context will be used based
|
||||
/// on the value of `use_threads`. If `use_threads` is false then the CPU exeuctor will
|
||||
/// be a serial executor and all CPU work will be done on the calling thread. I/O tasks
|
||||
/// will still happen on the I/O executor and may be multi-threaded.
|
||||
///
|
||||
/// If `use_threads` is false then all CPU work will happen during the calls to
|
||||
/// RecordBatchReader::Next and no CPU work will happen in the background. If
|
||||
/// `use_threads` is true then CPU work will happen on the CPU thread pool and tasks may
|
||||
/// run in between calls to RecordBatchReader::Next. If the returned reader is not
|
||||
/// consumed quickly enough then the plan will eventually pause as the backpressure queue
|
||||
/// fills up.
|
||||
///
|
||||
/// If a custom exec context is provided then the value of `use_threads` will be ignored.
|
||||
ARROW_EXPORT Result<std::unique_ptr<RecordBatchReader>> DeclarationToReader(
|
||||
Declaration declaration, bool use_threads = true,
|
||||
MemoryPool* memory_pool = default_memory_pool(),
|
||||
FunctionRegistry* function_registry = NULLPTR);
|
||||
|
||||
/// \brief Overload of \see DeclarationToReader accepting a custom exec context
|
||||
ARROW_EXPORT Result<std::unique_ptr<RecordBatchReader>> DeclarationToReader(
|
||||
Declaration declaration, ExecContext exec_context);
|
||||
|
||||
/// \brief Utility method to run a declaration and ignore results
|
||||
///
|
||||
/// This can be useful when the data are consumed as part of the plan itself, for
|
||||
/// example, when the plan ends with a write node.
|
||||
///
|
||||
/// \see DeclarationToTable for details on threading & execution
|
||||
ARROW_EXPORT Status DeclarationToStatus(Declaration declaration, bool use_threads = true,
|
||||
MemoryPool* memory_pool = default_memory_pool(),
|
||||
FunctionRegistry* function_registry = NULLPTR);
|
||||
|
||||
/// \brief Asynchronous version of \see DeclarationToStatus
|
||||
///
|
||||
/// This can be useful when the data are consumed as part of the plan itself, for
|
||||
/// example, when the plan ends with a write node.
|
||||
///
|
||||
/// \see DeclarationToTableAsync for details on threading & execution
|
||||
ARROW_EXPORT Future<> DeclarationToStatusAsync(
|
||||
Declaration declaration, bool use_threads = true,
|
||||
MemoryPool* memory_pool = default_memory_pool(),
|
||||
FunctionRegistry* function_registry = NULLPTR);
|
||||
|
||||
/// \brief Overload of \see DeclarationToStatusAsync accepting a custom exec context
|
||||
///
|
||||
/// \see DeclarationToTableAsync for details on threading & execution
|
||||
ARROW_EXPORT Future<> DeclarationToStatusAsync(Declaration declaration,
|
||||
ExecContext exec_context);
|
||||
|
||||
/// \brief Wrap an ExecBatch generator in a RecordBatchReader.
|
||||
///
|
||||
/// The RecordBatchReader does not impose any ordering on emitted batches.
|
||||
ARROW_EXPORT
|
||||
std::shared_ptr<RecordBatchReader> MakeGeneratorReader(
|
||||
std::shared_ptr<Schema>, std::function<Future<std::optional<ExecBatch>>()>,
|
||||
MemoryPool*);
|
||||
|
||||
constexpr int kDefaultBackgroundMaxQ = 32;
|
||||
constexpr int kDefaultBackgroundQRestart = 16;
|
||||
|
||||
/// \brief Make a generator of RecordBatchReaders
|
||||
///
|
||||
/// Useful as a source node for an Exec plan
|
||||
ARROW_EXPORT
|
||||
Result<std::function<Future<std::optional<ExecBatch>>()>> MakeReaderGenerator(
|
||||
std::shared_ptr<RecordBatchReader> reader, arrow::internal::Executor* io_executor,
|
||||
int max_q = kDefaultBackgroundMaxQ, int q_restart = kDefaultBackgroundQRestart);
|
||||
|
||||
/// @}
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,291 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
// This API is EXPERIMENTAL.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/type_fwd.h"
|
||||
#include "arrow/datum.h"
|
||||
#include "arrow/type_fwd.h"
|
||||
#include "arrow/util/small_vector.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
/// \defgroup expression-core Expressions to describe transformations in execution plans
|
||||
///
|
||||
/// @{
|
||||
|
||||
/// An unbound expression which maps a single Datum to another Datum.
|
||||
/// An expression is one of
|
||||
/// - A literal Datum.
|
||||
/// - A reference to a single (potentially nested) field of the input Datum.
|
||||
/// - A call to a compute function, with arguments specified by other Expressions.
|
||||
class ARROW_EXPORT Expression {
|
||||
public:
|
||||
struct Call {
|
||||
std::string function_name;
|
||||
std::vector<Expression> arguments;
|
||||
std::shared_ptr<FunctionOptions> options;
|
||||
// Cached hash value
|
||||
size_t hash;
|
||||
|
||||
// post-Bind properties:
|
||||
std::shared_ptr<Function> function;
|
||||
const Kernel* kernel = NULLPTR;
|
||||
std::shared_ptr<KernelState> kernel_state;
|
||||
TypeHolder type;
|
||||
|
||||
void ComputeHash();
|
||||
};
|
||||
|
||||
std::string ToString() const;
|
||||
bool Equals(const Expression& other) const;
|
||||
size_t hash() const;
|
||||
struct Hash {
|
||||
size_t operator()(const Expression& expr) const { return expr.hash(); }
|
||||
};
|
||||
|
||||
/// Bind this expression to the given input type, looking up Kernels and field types.
|
||||
/// Some expression simplification may be performed and implicit casts will be inserted.
|
||||
/// Any state necessary for execution will be initialized and returned.
|
||||
Result<Expression> Bind(const TypeHolder& in, ExecContext* = NULLPTR) const;
|
||||
Result<Expression> Bind(const Schema& in_schema, ExecContext* = NULLPTR) const;
|
||||
|
||||
// XXX someday
|
||||
// Clone all KernelState in this bound expression. If any function referenced by this
|
||||
// expression has mutable KernelState, it is not safe to execute or apply simplification
|
||||
// passes to it (or copies of it!) from multiple threads. Cloning state produces new
|
||||
// KernelStates where necessary to ensure that Expressions may be manipulated safely
|
||||
// on multiple threads.
|
||||
// Result<ExpressionState> CloneState() const;
|
||||
// Status SetState(ExpressionState);
|
||||
|
||||
/// Return true if all an expression's field references have explicit types
|
||||
/// and all of its functions' kernels are looked up.
|
||||
bool IsBound() const;
|
||||
|
||||
/// Return true if this expression is composed only of Scalar literals, field
|
||||
/// references, and calls to ScalarFunctions.
|
||||
bool IsScalarExpression() const;
|
||||
|
||||
/// Return true if this expression is literal and entirely null.
|
||||
bool IsNullLiteral() const;
|
||||
|
||||
/// Return true if this expression could evaluate to true. Will return true for any
|
||||
/// unbound, non-boolean, or unsimplified Expressions
|
||||
bool IsSatisfiable() const;
|
||||
|
||||
// XXX someday
|
||||
// Result<PipelineGraph> GetPipelines();
|
||||
|
||||
bool is_valid() const { return impl_ != NULLPTR; }
|
||||
|
||||
/// Access a Call or return nullptr if this expression is not a call
|
||||
const Call* call() const;
|
||||
/// Access a Datum or return nullptr if this expression is not a literal
|
||||
const Datum* literal() const;
|
||||
/// Access a FieldRef or return nullptr if this expression is not a field_ref
|
||||
const FieldRef* field_ref() const;
|
||||
|
||||
/// The type to which this expression will evaluate
|
||||
const DataType* type() const;
|
||||
// XXX someday
|
||||
// NullGeneralization::type nullable() const;
|
||||
|
||||
struct Parameter {
|
||||
FieldRef ref;
|
||||
|
||||
// post-bind properties
|
||||
TypeHolder type;
|
||||
::arrow::internal::SmallVector<int, 2> indices;
|
||||
};
|
||||
const Parameter* parameter() const;
|
||||
|
||||
Expression() = default;
|
||||
explicit Expression(Call call);
|
||||
explicit Expression(Datum literal);
|
||||
explicit Expression(Parameter parameter);
|
||||
|
||||
private:
|
||||
using Impl = std::variant<Datum, Parameter, Call>;
|
||||
std::shared_ptr<Impl> impl_;
|
||||
|
||||
ARROW_FRIEND_EXPORT friend bool Identical(const Expression& l, const Expression& r);
|
||||
};
|
||||
|
||||
inline bool operator==(const Expression& l, const Expression& r) { return l.Equals(r); }
|
||||
inline bool operator!=(const Expression& l, const Expression& r) { return !l.Equals(r); }
|
||||
|
||||
ARROW_EXPORT void PrintTo(const Expression&, std::ostream*);
|
||||
|
||||
// Factories
|
||||
|
||||
ARROW_EXPORT
|
||||
Expression literal(Datum lit);
|
||||
|
||||
template <typename Arg>
|
||||
Expression literal(Arg&& arg) {
|
||||
return literal(Datum(std::forward<Arg>(arg)));
|
||||
}
|
||||
|
||||
ARROW_EXPORT
|
||||
Expression field_ref(FieldRef ref);
|
||||
|
||||
ARROW_EXPORT
|
||||
Expression call(std::string function, std::vector<Expression> arguments,
|
||||
std::shared_ptr<FunctionOptions> options = NULLPTR);
|
||||
|
||||
template <typename Options, typename = typename std::enable_if<
|
||||
std::is_base_of<FunctionOptions, Options>::value>::type>
|
||||
Expression call(std::string function, std::vector<Expression> arguments,
|
||||
Options options) {
|
||||
return call(std::move(function), std::move(arguments),
|
||||
std::make_shared<Options>(std::move(options)));
|
||||
}
|
||||
|
||||
/// Assemble a list of all fields referenced by an Expression at any depth.
|
||||
ARROW_EXPORT
|
||||
std::vector<FieldRef> FieldsInExpression(const Expression&);
|
||||
|
||||
/// Check if the expression references any fields.
|
||||
ARROW_EXPORT
|
||||
bool ExpressionHasFieldRefs(const Expression&);
|
||||
|
||||
struct ARROW_EXPORT KnownFieldValues;
|
||||
|
||||
/// Assemble a mapping from field references to known values. This derives known values
|
||||
/// from "equal" and "is_null" Expressions referencing a field and a literal.
|
||||
ARROW_EXPORT
|
||||
Result<KnownFieldValues> ExtractKnownFieldValues(
|
||||
const Expression& guaranteed_true_predicate);
|
||||
|
||||
/// @}
|
||||
|
||||
/// \defgroup expression-passes Functions for modification of Expressions
|
||||
///
|
||||
/// @{
|
||||
///
|
||||
/// These transform bound expressions. Some transforms utilize a guarantee, which is
|
||||
/// provided as an Expression which is guaranteed to evaluate to true. The
|
||||
/// guaranteed_true_predicate need not be bound, but canonicalization is currently
|
||||
/// deferred to producers of guarantees. For example in order to be recognized as a
|
||||
/// guarantee on a field value, an Expression must be a call to "equal" with field_ref LHS
|
||||
/// and literal RHS. Flipping the arguments, "is_in" with a one-long value_set, ... or
|
||||
/// other semantically identical Expressions will not be recognized.
|
||||
|
||||
/// Weak canonicalization which establishes guarantees for subsequent passes. Even
|
||||
/// equivalent Expressions may result in different canonicalized expressions.
|
||||
/// TODO this could be a strong canonicalization
|
||||
ARROW_EXPORT
|
||||
Result<Expression> Canonicalize(Expression, ExecContext* = NULLPTR);
|
||||
|
||||
/// Simplify Expressions based on literal arguments (for example, add(null, x) will always
|
||||
/// be null so replace the call with a null literal). Includes early evaluation of all
|
||||
/// calls whose arguments are entirely literal.
|
||||
ARROW_EXPORT
|
||||
Result<Expression> FoldConstants(Expression);
|
||||
|
||||
/// Simplify Expressions by replacing with known values of the fields which it references.
|
||||
ARROW_EXPORT
|
||||
Result<Expression> ReplaceFieldsWithKnownValues(const KnownFieldValues& known_values,
|
||||
Expression);
|
||||
|
||||
/// Simplify an expression by replacing subexpressions based on a guarantee:
|
||||
/// a boolean expression which is guaranteed to evaluate to `true`. For example, this is
|
||||
/// used to remove redundant function calls from a filter expression or to replace a
|
||||
/// reference to a constant-value field with a literal.
|
||||
ARROW_EXPORT
|
||||
Result<Expression> SimplifyWithGuarantee(Expression,
|
||||
const Expression& guaranteed_true_predicate);
|
||||
|
||||
/// Replace all named field refs (e.g. "x" or "x.y") with field paths (e.g. [0] or [1,3])
|
||||
///
|
||||
/// This isn't usually needed and does not offer any simplification by itself. However,
|
||||
/// it can be useful to normalize an expression to paths to make it simpler to work with.
|
||||
ARROW_EXPORT Result<Expression> RemoveNamedRefs(Expression expression);
|
||||
|
||||
/// @}
|
||||
|
||||
// Execution
|
||||
|
||||
/// Create an ExecBatch suitable for passing to ExecuteScalarExpression() from a
|
||||
/// RecordBatch which may have missing or incorrectly ordered columns.
|
||||
/// Missing fields will be replaced with null scalars.
|
||||
ARROW_EXPORT Result<ExecBatch> MakeExecBatch(const Schema& full_schema,
|
||||
const Datum& partial,
|
||||
Expression guarantee = literal(true));
|
||||
|
||||
/// Execute a scalar expression against the provided state and input ExecBatch. This
|
||||
/// expression must be bound.
|
||||
ARROW_EXPORT
|
||||
Result<Datum> ExecuteScalarExpression(const Expression&, const ExecBatch& input,
|
||||
ExecContext* = NULLPTR);
|
||||
|
||||
/// Convenience function for invoking against a RecordBatch
|
||||
ARROW_EXPORT
|
||||
Result<Datum> ExecuteScalarExpression(const Expression&, const Schema& full_schema,
|
||||
const Datum& partial_input, ExecContext* = NULLPTR);
|
||||
|
||||
// Serialization
|
||||
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Buffer>> Serialize(const Expression&);
|
||||
|
||||
ARROW_EXPORT
|
||||
Result<Expression> Deserialize(std::shared_ptr<Buffer>);
|
||||
|
||||
/// \defgroup expression-convenience Functions convenient expression creation
|
||||
///
|
||||
/// @{
|
||||
|
||||
ARROW_EXPORT Expression project(std::vector<Expression> values,
|
||||
std::vector<std::string> names);
|
||||
|
||||
ARROW_EXPORT Expression equal(Expression lhs, Expression rhs);
|
||||
|
||||
ARROW_EXPORT Expression not_equal(Expression lhs, Expression rhs);
|
||||
|
||||
ARROW_EXPORT Expression less(Expression lhs, Expression rhs);
|
||||
|
||||
ARROW_EXPORT Expression less_equal(Expression lhs, Expression rhs);
|
||||
|
||||
ARROW_EXPORT Expression greater(Expression lhs, Expression rhs);
|
||||
|
||||
ARROW_EXPORT Expression greater_equal(Expression lhs, Expression rhs);
|
||||
|
||||
ARROW_EXPORT Expression is_null(Expression lhs, bool nan_is_null = false);
|
||||
|
||||
ARROW_EXPORT Expression is_valid(Expression lhs);
|
||||
|
||||
ARROW_EXPORT Expression and_(Expression lhs, Expression rhs);
|
||||
ARROW_EXPORT Expression and_(const std::vector<Expression>&);
|
||||
ARROW_EXPORT Expression or_(Expression lhs, Expression rhs);
|
||||
ARROW_EXPORT Expression or_(const std::vector<Expression>&);
|
||||
ARROW_EXPORT Expression not_(Expression operand);
|
||||
|
||||
/// @}
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,75 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/exec/accumulation_queue.h"
|
||||
#include "arrow/compute/exec/bloom_filter.h"
|
||||
#include "arrow/compute/exec/options.h"
|
||||
#include "arrow/compute/exec/query_context.h"
|
||||
#include "arrow/compute/exec/schema_util.h"
|
||||
#include "arrow/compute/exec/task_util.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/status.h"
|
||||
#include "arrow/type.h"
|
||||
#include "arrow/util/tracing.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
using arrow::util::AccumulationQueue;
|
||||
|
||||
class HashJoinImpl {
|
||||
public:
|
||||
using OutputBatchCallback = std::function<void(int64_t, ExecBatch)>;
|
||||
using BuildFinishedCallback = std::function<Status(size_t)>;
|
||||
using FinishedCallback = std::function<void(int64_t)>;
|
||||
using RegisterTaskGroupCallback = std::function<int(
|
||||
std::function<Status(size_t, int64_t)>, std::function<Status(size_t)>)>;
|
||||
using StartTaskGroupCallback = std::function<Status(int, int64_t)>;
|
||||
using AbortContinuationImpl = std::function<void()>;
|
||||
|
||||
virtual ~HashJoinImpl() = default;
|
||||
virtual Status Init(QueryContext* ctx, JoinType join_type, size_t num_threads,
|
||||
const HashJoinProjectionMaps* proj_map_left,
|
||||
const HashJoinProjectionMaps* proj_map_right,
|
||||
std::vector<JoinKeyCmp> key_cmp, Expression filter,
|
||||
RegisterTaskGroupCallback register_task_group_callback,
|
||||
StartTaskGroupCallback start_task_group_callback,
|
||||
OutputBatchCallback output_batch_callback,
|
||||
FinishedCallback finished_callback) = 0;
|
||||
|
||||
virtual Status BuildHashTable(size_t thread_index, AccumulationQueue batches,
|
||||
BuildFinishedCallback on_finished) = 0;
|
||||
virtual Status ProbeSingleBatch(size_t thread_index, ExecBatch batch) = 0;
|
||||
virtual Status ProbingFinished(size_t thread_index) = 0;
|
||||
virtual void Abort(TaskScheduler::AbortContinuationImpl pos_abort_callback) = 0;
|
||||
virtual std::string ToString() const = 0;
|
||||
|
||||
static Result<std::unique_ptr<HashJoinImpl>> MakeBasic();
|
||||
static Result<std::unique_ptr<HashJoinImpl>> MakeSwiss();
|
||||
|
||||
protected:
|
||||
util::tracing::Span span_;
|
||||
};
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,315 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "arrow/compute/exec.h"
|
||||
#include "arrow/compute/exec/schema_util.h"
|
||||
#include "arrow/compute/kernels/row_encoder.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/status.h"
|
||||
#include "arrow/type.h"
|
||||
|
||||
// This file contains hash join logic related to handling of dictionary encoded key
|
||||
// columns.
|
||||
//
|
||||
// A key column from probe side of the join can be matched against a key column from build
|
||||
// side of the join, as long as the underlying value types are equal. That means that:
|
||||
// - both scalars and arrays can be used and even mixed in the same column
|
||||
// - dictionary column can be matched against non-dictionary column if underlying value
|
||||
// types are equal
|
||||
// - dictionary column can be matched against dictionary column with a different index
|
||||
// type, and potentially using a different dictionary, if underlying value types are equal
|
||||
//
|
||||
// We currently require in hash join that for all dictionary encoded columns, the same
|
||||
// dictionary is used in all input exec batches.
|
||||
//
|
||||
// In order to allow matching columns with different dictionaries, different dictionary
|
||||
// index types, and dictionary key against non-dictionary key, internally comparisons will
|
||||
// be evaluated after remapping values on both sides of the join to a common
|
||||
// representation (which will be called "unified representation"). This common
|
||||
// representation is a column of int32() type (not a dictionary column). It represents an
|
||||
// index in the unified dictionary computed for the (only) dictionary present on build
|
||||
// side (an empty dictionary is still created for an empty build side). Null value is
|
||||
// always represented in this common representation as null int32 value, unified
|
||||
// dictionary will never contain a null value (so there is no ambiguity of representing
|
||||
// nulls as either index to a null entry in the dictionary or null index).
|
||||
//
|
||||
// Unified dictionary represents values present on build side. There may be values on
|
||||
// probe side that are not present in it. All such values, that are not null, are mapped
|
||||
// in the common representation to a special constant kMissingValueId.
|
||||
//
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
using internal::RowEncoder;
|
||||
|
||||
/// Helper class with operations that are stateless and common to processing of dictionary
|
||||
/// keys on both build and probe side.
|
||||
class HashJoinDictUtil {
|
||||
public:
|
||||
// Null values in unified representation are always represented as null that has
|
||||
// corresponding integer set to this constant
|
||||
static constexpr int32_t kNullId = 0;
|
||||
// Constant representing a value, that is not null, missing on the build side, in
|
||||
// unified representation.
|
||||
static constexpr int32_t kMissingValueId = -1;
|
||||
|
||||
// Check if data types of corresponding pair of key column on build and probe side are
|
||||
// compatible
|
||||
static bool KeyDataTypesValid(const std::shared_ptr<DataType>& probe_data_type,
|
||||
const std::shared_ptr<DataType>& build_data_type);
|
||||
|
||||
// Input must be dictionary array or dictionary scalar.
|
||||
// A precomputed and provided here lookup table in the form of int32() array will be
|
||||
// used to remap input indices to unified representation.
|
||||
//
|
||||
static Result<std::shared_ptr<ArrayData>> IndexRemapUsingLUT(
|
||||
ExecContext* ctx, const Datum& indices, int64_t batch_length,
|
||||
const std::shared_ptr<ArrayData>& map_array,
|
||||
const std::shared_ptr<DataType>& data_type);
|
||||
|
||||
// Return int32() array that contains indices of input dictionary array or scalar after
|
||||
// type casting.
|
||||
static Result<std::shared_ptr<ArrayData>> ConvertToInt32(
|
||||
const std::shared_ptr<DataType>& from_type, const Datum& input,
|
||||
int64_t batch_length, ExecContext* ctx);
|
||||
|
||||
// Return an array that contains elements of input int32() array after casting to a
|
||||
// given integer type. This is used for mapping unified representation stored in the
|
||||
// hash table on build side back to original input data type of hash join, when
|
||||
// outputting hash join results to parent exec node.
|
||||
//
|
||||
static Result<std::shared_ptr<ArrayData>> ConvertFromInt32(
|
||||
const std::shared_ptr<DataType>& to_type, const Datum& input, int64_t batch_length,
|
||||
ExecContext* ctx);
|
||||
|
||||
// Return dictionary referenced in either dictionary array or dictionary scalar
|
||||
static std::shared_ptr<Array> ExtractDictionary(const Datum& data);
|
||||
};
|
||||
|
||||
/// Implements processing of dictionary arrays/scalars in key columns on the build side of
|
||||
/// a hash join.
|
||||
/// Each instance of this class corresponds to a single column and stores and
|
||||
/// processes only the information related to that column.
|
||||
/// Const methods are thread-safe, non-const methods are not (the caller must make sure
|
||||
/// that only one thread at any time will access them).
|
||||
///
|
||||
class HashJoinDictBuild {
|
||||
public:
|
||||
// Returns true if the key column (described in input by its data type) requires any
|
||||
// pre- or post-processing related to handling dictionaries.
|
||||
//
|
||||
static bool KeyNeedsProcessing(const std::shared_ptr<DataType>& build_data_type) {
|
||||
return (build_data_type->id() == Type::DICTIONARY);
|
||||
}
|
||||
|
||||
// Data type of unified representation
|
||||
static std::shared_ptr<DataType> DataTypeAfterRemapping() { return int32(); }
|
||||
|
||||
// Should be called only once in hash join, before processing any build or probe
|
||||
// batches.
|
||||
//
|
||||
// Takes a pointer to the dictionary for a corresponding key column on the build side as
|
||||
// an input. If the build side is empty, it still needs to be called, but with
|
||||
// dictionary pointer set to null.
|
||||
//
|
||||
// Currently it is required that all input batches on build side share the same
|
||||
// dictionary. For each input batch during its pre-processing, dictionary will be
|
||||
// checked and error will be returned if it is different then the one provided in the
|
||||
// call to this method.
|
||||
//
|
||||
// Unifies the dictionary. The order of the values is still preserved.
|
||||
// Null and duplicate entries are removed. If the dictionary is already unified, its
|
||||
// copy will be produced and stored within this class.
|
||||
//
|
||||
// Prepares the mapping from ids within original dictionary to the ids in the resulting
|
||||
// dictionary. This is used later on to pre-process (map to unified representation) key
|
||||
// column on build side.
|
||||
//
|
||||
// Prepares the reverse mapping (in the form of hash table) from values to the ids in
|
||||
// the resulting dictionary. This will be used later on to pre-process (map to unified
|
||||
// representation) key column on probe side. Values on probe side that are not present
|
||||
// in the original dictionary will be mapped to a special constant kMissingValueId. The
|
||||
// exception is made for nulls, which get always mapped to nulls (both when null is
|
||||
// represented as a dictionary id pointing to a null and a null dictionary id).
|
||||
//
|
||||
Status Init(ExecContext* ctx, std::shared_ptr<Array> dictionary,
|
||||
std::shared_ptr<DataType> index_type, std::shared_ptr<DataType> value_type);
|
||||
|
||||
// Remap array or scalar values into unified representation (array of int32()).
|
||||
// Outputs kMissingValueId if input value is not found in the unified dictionary.
|
||||
// Outputs null for null input value (with corresponding data set to kNullId).
|
||||
//
|
||||
Result<std::shared_ptr<ArrayData>> RemapInputValues(ExecContext* ctx,
|
||||
const Datum& values,
|
||||
int64_t batch_length) const;
|
||||
|
||||
// Remap dictionary array or dictionary scalar on build side to unified representation.
|
||||
// Dictionary referenced in the input must match the dictionary that was
|
||||
// given during initialization.
|
||||
// The output is a dictionary array that references unified dictionary.
|
||||
//
|
||||
Result<std::shared_ptr<ArrayData>> RemapInput(
|
||||
ExecContext* ctx, const Datum& indices, int64_t batch_length,
|
||||
const std::shared_ptr<DataType>& data_type) const;
|
||||
|
||||
// Outputs dictionary array referencing unified dictionary, given an array with 32-bit
|
||||
// ids.
|
||||
// Used to post-process values looked up in a hash table on build side of the hash join
|
||||
// before outputting to the parent exec node.
|
||||
//
|
||||
Result<std::shared_ptr<ArrayData>> RemapOutput(const ArrayData& indices32Bit,
|
||||
ExecContext* ctx) const;
|
||||
|
||||
// Release shared pointers and memory
|
||||
void CleanUp();
|
||||
|
||||
private:
|
||||
// Data type of dictionary ids for the input dictionary on build side
|
||||
std::shared_ptr<DataType> index_type_;
|
||||
// Data type of values for the input dictionary on build side
|
||||
std::shared_ptr<DataType> value_type_;
|
||||
// Mapping from (encoded as string) values to the ids in unified dictionary
|
||||
std::unordered_map<std::string, int32_t> hash_table_;
|
||||
// Mapping from input dictionary ids to unified dictionary ids
|
||||
std::shared_ptr<ArrayData> remapped_ids_;
|
||||
// Input dictionary
|
||||
std::shared_ptr<Array> dictionary_;
|
||||
// Unified dictionary
|
||||
std::shared_ptr<ArrayData> unified_dictionary_;
|
||||
};
|
||||
|
||||
/// Implements processing of dictionary arrays/scalars in key columns on the probe side of
|
||||
/// a hash join.
|
||||
/// Each instance of this class corresponds to a single column and stores and
|
||||
/// processes only the information related to that column.
|
||||
/// It is not thread-safe - every participating thread should use its own instance of
|
||||
/// this class.
|
||||
///
|
||||
class HashJoinDictProbe {
|
||||
public:
|
||||
static bool KeyNeedsProcessing(const std::shared_ptr<DataType>& probe_data_type,
|
||||
const std::shared_ptr<DataType>& build_data_type);
|
||||
|
||||
// Data type of the result of remapping input key column.
|
||||
//
|
||||
// The result of remapping is what is used in hash join for matching keys on build and
|
||||
// probe side. The exact data types may be different, as described below, and therefore
|
||||
// a common representation is needed for simplifying comparisons of pairs of keys on
|
||||
// both sides.
|
||||
//
|
||||
// We support matching key that is of non-dictionary type with key that is of dictionary
|
||||
// type, as long as the underlying value types are equal. We support matching when both
|
||||
// keys are of dictionary type, regardless whether underlying dictionary index types are
|
||||
// the same or not.
|
||||
//
|
||||
static std::shared_ptr<DataType> DataTypeAfterRemapping(
|
||||
const std::shared_ptr<DataType>& build_data_type);
|
||||
|
||||
// Should only be called if KeyNeedsProcessing method returns true for a pair of
|
||||
// corresponding key columns from build and probe side.
|
||||
// Converts values in order to match the common representation for
|
||||
// both build and probe side used in hash table comparison.
|
||||
// Supports arrays and scalars as input.
|
||||
// Argument opt_build_side should be null if dictionary key on probe side is matched
|
||||
// with non-dictionary key on build side.
|
||||
//
|
||||
Result<std::shared_ptr<ArrayData>> RemapInput(
|
||||
const HashJoinDictBuild* opt_build_side, const Datum& data, int64_t batch_length,
|
||||
const std::shared_ptr<DataType>& probe_data_type,
|
||||
const std::shared_ptr<DataType>& build_data_type, ExecContext* ctx);
|
||||
|
||||
void CleanUp();
|
||||
|
||||
private:
|
||||
// May be null if probe side key is non-dictionary. Otherwise it is used to verify that
|
||||
// only a single dictionary is referenced in exec batch on probe side of hash join.
|
||||
std::shared_ptr<Array> dictionary_;
|
||||
// Mapping from dictionary on probe side of hash join (if it is used) to unified
|
||||
// representation.
|
||||
std::shared_ptr<ArrayData> remapped_ids_;
|
||||
// Encoder of key columns that uses unified representation instead of original data type
|
||||
// for key columns that need to use it (have dictionaries on either side of the join).
|
||||
internal::RowEncoder encoder_;
|
||||
};
|
||||
|
||||
// Encapsulates dictionary handling logic for build side of hash join.
|
||||
//
|
||||
class HashJoinDictBuildMulti {
|
||||
public:
|
||||
Status Init(const SchemaProjectionMaps<HashJoinProjection>& proj_map,
|
||||
const ExecBatch* opt_non_empty_batch, ExecContext* ctx);
|
||||
static void InitEncoder(const SchemaProjectionMaps<HashJoinProjection>& proj_map,
|
||||
RowEncoder* encoder, ExecContext* ctx);
|
||||
Status EncodeBatch(size_t thread_index,
|
||||
const SchemaProjectionMaps<HashJoinProjection>& proj_map,
|
||||
const ExecBatch& batch, RowEncoder* encoder, ExecContext* ctx) const;
|
||||
Status PostDecode(const SchemaProjectionMaps<HashJoinProjection>& proj_map,
|
||||
ExecBatch* decoded_key_batch, ExecContext* ctx);
|
||||
const HashJoinDictBuild& get_dict_build(int icol) const { return remap_imp_[icol]; }
|
||||
|
||||
private:
|
||||
std::vector<bool> needs_remap_;
|
||||
std::vector<HashJoinDictBuild> remap_imp_;
|
||||
};
|
||||
|
||||
// Encapsulates dictionary handling logic for probe side of hash join
|
||||
//
|
||||
class HashJoinDictProbeMulti {
|
||||
public:
|
||||
void Init(size_t num_threads);
|
||||
bool BatchRemapNeeded(size_t thread_index,
|
||||
const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
|
||||
const SchemaProjectionMaps<HashJoinProjection>& proj_map_build,
|
||||
ExecContext* ctx);
|
||||
Status EncodeBatch(size_t thread_index,
|
||||
const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
|
||||
const SchemaProjectionMaps<HashJoinProjection>& proj_map_build,
|
||||
const HashJoinDictBuildMulti& dict_build, const ExecBatch& batch,
|
||||
RowEncoder** out_encoder, ExecBatch* opt_out_key_batch,
|
||||
ExecContext* ctx);
|
||||
|
||||
private:
|
||||
void InitLocalStateIfNeeded(
|
||||
size_t thread_index, const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
|
||||
const SchemaProjectionMaps<HashJoinProjection>& proj_map_build, ExecContext* ctx);
|
||||
static void InitEncoder(const SchemaProjectionMaps<HashJoinProjection>& proj_map_probe,
|
||||
const SchemaProjectionMaps<HashJoinProjection>& proj_map_build,
|
||||
RowEncoder* encoder, ExecContext* ctx);
|
||||
struct ThreadLocalState {
|
||||
bool is_initialized;
|
||||
// Whether any key column needs remapping (because of dictionaries used) before doing
|
||||
// join hash table lookups
|
||||
bool any_needs_remap;
|
||||
// Whether each key column needs remapping before doing join hash table lookups
|
||||
std::vector<bool> needs_remap;
|
||||
std::vector<HashJoinDictProbe> remap_imp;
|
||||
// Encoder of key columns that uses unified representation instead of original data
|
||||
// type for key columns that need to use it (have dictionaries on either side of the
|
||||
// join).
|
||||
RowEncoder post_remap_encoder;
|
||||
};
|
||||
std::vector<ThreadLocalState> local_states_;
|
||||
};
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,99 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/exec/options.h"
|
||||
#include "arrow/compute/exec/schema_util.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/status.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
class ARROW_EXPORT HashJoinSchema {
|
||||
public:
|
||||
Status Init(JoinType join_type, const Schema& left_schema,
|
||||
const std::vector<FieldRef>& left_keys, const Schema& right_schema,
|
||||
const std::vector<FieldRef>& right_keys, const Expression& filter,
|
||||
const std::string& left_field_name_prefix,
|
||||
const std::string& right_field_name_prefix);
|
||||
|
||||
Status Init(JoinType join_type, const Schema& left_schema,
|
||||
const std::vector<FieldRef>& left_keys,
|
||||
const std::vector<FieldRef>& left_output, const Schema& right_schema,
|
||||
const std::vector<FieldRef>& right_keys,
|
||||
const std::vector<FieldRef>& right_output, const Expression& filter,
|
||||
const std::string& left_field_name_prefix,
|
||||
const std::string& right_field_name_prefix);
|
||||
|
||||
static Status ValidateSchemas(JoinType join_type, const Schema& left_schema,
|
||||
const std::vector<FieldRef>& left_keys,
|
||||
const std::vector<FieldRef>& left_output,
|
||||
const Schema& right_schema,
|
||||
const std::vector<FieldRef>& right_keys,
|
||||
const std::vector<FieldRef>& right_output,
|
||||
const std::string& left_field_name_prefix,
|
||||
const std::string& right_field_name_prefix);
|
||||
|
||||
bool HasDictionaries() const;
|
||||
|
||||
bool HasLargeBinary() const;
|
||||
|
||||
Result<Expression> BindFilter(Expression filter, const Schema& left_schema,
|
||||
const Schema& right_schema, ExecContext* exec_context);
|
||||
std::shared_ptr<Schema> MakeOutputSchema(const std::string& left_field_name_suffix,
|
||||
const std::string& right_field_name_suffix);
|
||||
|
||||
bool LeftPayloadIsEmpty() { return PayloadIsEmpty(0); }
|
||||
|
||||
bool RightPayloadIsEmpty() { return PayloadIsEmpty(1); }
|
||||
|
||||
static int kMissingField() {
|
||||
return SchemaProjectionMaps<HashJoinProjection>::kMissingField;
|
||||
}
|
||||
|
||||
SchemaProjectionMaps<HashJoinProjection> proj_maps[2];
|
||||
|
||||
private:
|
||||
static bool IsTypeSupported(const DataType& type);
|
||||
|
||||
Status CollectFilterColumns(std::vector<FieldRef>& left_filter,
|
||||
std::vector<FieldRef>& right_filter,
|
||||
const Expression& filter, const Schema& left_schema,
|
||||
const Schema& right_schema);
|
||||
|
||||
Expression RewriteFilterToUseFilterSchema(int right_filter_offset,
|
||||
const SchemaProjectionMap& left_to_filter,
|
||||
const SchemaProjectionMap& right_to_filter,
|
||||
const Expression& filter);
|
||||
|
||||
bool PayloadIsEmpty(int side) {
|
||||
ARROW_DCHECK(side == 0 || side == 1);
|
||||
return proj_maps[side].num_cols(HashJoinProjection::PAYLOAD) == 0;
|
||||
}
|
||||
|
||||
static Result<std::vector<FieldRef>> ComputePayload(const Schema& schema,
|
||||
const std::vector<FieldRef>& output,
|
||||
const std::vector<FieldRef>& filter,
|
||||
const std::vector<FieldRef>& key);
|
||||
};
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,223 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#if defined(ARROW_HAVE_AVX2)
|
||||
#include <immintrin.h>
|
||||
#endif
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "arrow/compute/exec/util.h"
|
||||
#include "arrow/compute/light_array.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
// Forward declarations only needed for making test functions a friend of the classes in
|
||||
// this file.
|
||||
//
|
||||
enum class BloomFilterBuildStrategy;
|
||||
|
||||
// Implementations are based on xxh3 32-bit algorithm description from:
|
||||
// https://github.com/Cyan4973/xxHash/blob/dev/doc/xxhash_spec.md
|
||||
//
|
||||
class ARROW_EXPORT Hashing32 {
|
||||
friend class TestVectorHash;
|
||||
template <typename T>
|
||||
friend void TestBloomLargeHashHelper(int64_t, int64_t, const std::vector<uint64_t>&,
|
||||
int64_t, int, T*);
|
||||
friend void TestBloomSmall(BloomFilterBuildStrategy, int64_t, int, bool, bool);
|
||||
|
||||
public:
|
||||
static void HashMultiColumn(const std::vector<KeyColumnArray>& cols, LightContext* ctx,
|
||||
uint32_t* out_hash);
|
||||
|
||||
static Status HashBatch(const ExecBatch& key_batch, uint32_t* hashes,
|
||||
std::vector<KeyColumnArray>& column_arrays,
|
||||
int64_t hardware_flags, util::TempVectorStack* temp_stack,
|
||||
int64_t offset, int64_t length);
|
||||
|
||||
private:
|
||||
static const uint32_t PRIME32_1 = 0x9E3779B1;
|
||||
static const uint32_t PRIME32_2 = 0x85EBCA77;
|
||||
static const uint32_t PRIME32_3 = 0xC2B2AE3D;
|
||||
static const uint32_t PRIME32_4 = 0x27D4EB2F;
|
||||
static const uint32_t PRIME32_5 = 0x165667B1;
|
||||
static const uint32_t kCombineConst = 0x9e3779b9UL;
|
||||
static const int64_t kStripeSize = 4 * sizeof(uint32_t);
|
||||
|
||||
static void HashFixed(int64_t hardware_flags, bool combine_hashes, uint32_t num_keys,
|
||||
uint64_t length_key, const uint8_t* keys, uint32_t* hashes,
|
||||
uint32_t* temp_hashes_for_combine);
|
||||
|
||||
static void HashVarLen(int64_t hardware_flags, bool combine_hashes, uint32_t num_rows,
|
||||
const uint32_t* offsets, const uint8_t* concatenated_keys,
|
||||
uint32_t* hashes, uint32_t* temp_hashes_for_combine);
|
||||
|
||||
static void HashVarLen(int64_t hardware_flags, bool combine_hashes, uint32_t num_rows,
|
||||
const uint64_t* offsets, const uint8_t* concatenated_keys,
|
||||
uint32_t* hashes, uint32_t* temp_hashes_for_combine);
|
||||
|
||||
static inline uint32_t Avalanche(uint32_t acc) {
|
||||
acc ^= (acc >> 15);
|
||||
acc *= PRIME32_2;
|
||||
acc ^= (acc >> 13);
|
||||
acc *= PRIME32_3;
|
||||
acc ^= (acc >> 16);
|
||||
return acc;
|
||||
}
|
||||
static inline uint32_t Round(uint32_t acc, uint32_t input);
|
||||
static inline uint32_t CombineAccumulators(uint32_t acc1, uint32_t acc2, uint32_t acc3,
|
||||
uint32_t acc4);
|
||||
static inline uint32_t CombineHashesImp(uint32_t previous_hash, uint32_t hash) {
|
||||
uint32_t next_hash = previous_hash ^ (hash + kCombineConst + (previous_hash << 6) +
|
||||
(previous_hash >> 2));
|
||||
return next_hash;
|
||||
}
|
||||
static inline void ProcessFullStripes(uint64_t num_stripes, const uint8_t* key,
|
||||
uint32_t* out_acc1, uint32_t* out_acc2,
|
||||
uint32_t* out_acc3, uint32_t* out_acc4);
|
||||
static inline void ProcessLastStripe(uint32_t mask1, uint32_t mask2, uint32_t mask3,
|
||||
uint32_t mask4, const uint8_t* last_stripe,
|
||||
uint32_t* acc1, uint32_t* acc2, uint32_t* acc3,
|
||||
uint32_t* acc4);
|
||||
static inline void StripeMask(int i, uint32_t* mask1, uint32_t* mask2, uint32_t* mask3,
|
||||
uint32_t* mask4);
|
||||
template <bool T_COMBINE_HASHES>
|
||||
static void HashFixedLenImp(uint32_t num_rows, uint64_t length, const uint8_t* keys,
|
||||
uint32_t* hashes);
|
||||
template <typename T, bool T_COMBINE_HASHES>
|
||||
static void HashVarLenImp(uint32_t num_rows, const T* offsets,
|
||||
const uint8_t* concatenated_keys, uint32_t* hashes);
|
||||
template <bool T_COMBINE_HASHES>
|
||||
static void HashBitImp(int64_t bit_offset, uint32_t num_keys, const uint8_t* keys,
|
||||
uint32_t* hashes);
|
||||
static void HashBit(bool combine_hashes, int64_t bit_offset, uint32_t num_keys,
|
||||
const uint8_t* keys, uint32_t* hashes);
|
||||
template <bool T_COMBINE_HASHES, typename T>
|
||||
static void HashIntImp(uint32_t num_keys, const T* keys, uint32_t* hashes);
|
||||
static void HashInt(bool combine_hashes, uint32_t num_keys, uint64_t length_key,
|
||||
const uint8_t* keys, uint32_t* hashes);
|
||||
|
||||
#if defined(ARROW_HAVE_AVX2)
|
||||
static inline __m256i Avalanche_avx2(__m256i hash);
|
||||
static inline __m256i CombineHashesImp_avx2(__m256i previous_hash, __m256i hash);
|
||||
template <bool T_COMBINE_HASHES>
|
||||
static void AvalancheAll_avx2(uint32_t num_rows, uint32_t* hashes,
|
||||
const uint32_t* hashes_temp_for_combine);
|
||||
static inline __m256i Round_avx2(__m256i acc, __m256i input);
|
||||
static inline uint64_t CombineAccumulators_avx2(__m256i acc);
|
||||
static inline __m256i StripeMask_avx2(int i, int j);
|
||||
template <bool two_equal_lengths>
|
||||
static inline __m256i ProcessStripes_avx2(int64_t num_stripes_A, int64_t num_stripes_B,
|
||||
__m256i mask_last_stripe, const uint8_t* keys,
|
||||
int64_t offset_A, int64_t offset_B);
|
||||
template <bool T_COMBINE_HASHES>
|
||||
static uint32_t HashFixedLenImp_avx2(uint32_t num_rows, uint64_t length,
|
||||
const uint8_t* keys, uint32_t* hashes,
|
||||
uint32_t* hashes_temp_for_combine);
|
||||
static uint32_t HashFixedLen_avx2(bool combine_hashes, uint32_t num_rows,
|
||||
uint64_t length, const uint8_t* keys,
|
||||
uint32_t* hashes, uint32_t* hashes_temp_for_combine);
|
||||
template <typename T, bool T_COMBINE_HASHES>
|
||||
static uint32_t HashVarLenImp_avx2(uint32_t num_rows, const T* offsets,
|
||||
const uint8_t* concatenated_keys, uint32_t* hashes,
|
||||
uint32_t* hashes_temp_for_combine);
|
||||
static uint32_t HashVarLen_avx2(bool combine_hashes, uint32_t num_rows,
|
||||
const uint32_t* offsets,
|
||||
const uint8_t* concatenated_keys, uint32_t* hashes,
|
||||
uint32_t* hashes_temp_for_combine);
|
||||
static uint32_t HashVarLen_avx2(bool combine_hashes, uint32_t num_rows,
|
||||
const uint64_t* offsets,
|
||||
const uint8_t* concatenated_keys, uint32_t* hashes,
|
||||
uint32_t* hashes_temp_for_combine);
|
||||
#endif
|
||||
};
|
||||
|
||||
class ARROW_EXPORT Hashing64 {
|
||||
friend class TestVectorHash;
|
||||
template <typename T>
|
||||
friend void TestBloomLargeHashHelper(int64_t, int64_t, const std::vector<uint64_t>&,
|
||||
int64_t, int, T*);
|
||||
friend void TestBloomSmall(BloomFilterBuildStrategy, int64_t, int, bool, bool);
|
||||
|
||||
public:
|
||||
static void HashMultiColumn(const std::vector<KeyColumnArray>& cols, LightContext* ctx,
|
||||
uint64_t* hashes);
|
||||
|
||||
static Status HashBatch(const ExecBatch& key_batch, uint64_t* hashes,
|
||||
std::vector<KeyColumnArray>& column_arrays,
|
||||
int64_t hardware_flags, util::TempVectorStack* temp_stack,
|
||||
int64_t offset, int64_t length);
|
||||
|
||||
private:
|
||||
static const uint64_t PRIME64_1 = 0x9E3779B185EBCA87ULL;
|
||||
static const uint64_t PRIME64_2 = 0xC2B2AE3D27D4EB4FULL;
|
||||
static const uint64_t PRIME64_3 = 0x165667B19E3779F9ULL;
|
||||
static const uint64_t PRIME64_4 = 0x85EBCA77C2B2AE63ULL;
|
||||
static const uint64_t PRIME64_5 = 0x27D4EB2F165667C5ULL;
|
||||
static const uint32_t kCombineConst = 0x9e3779b9UL;
|
||||
static const int64_t kStripeSize = 4 * sizeof(uint64_t);
|
||||
|
||||
static void HashFixed(bool combine_hashes, uint32_t num_keys, uint64_t length_key,
|
||||
const uint8_t* keys, uint64_t* hashes);
|
||||
|
||||
static void HashVarLen(bool combine_hashes, uint32_t num_rows, const uint32_t* offsets,
|
||||
const uint8_t* concatenated_keys, uint64_t* hashes);
|
||||
|
||||
static void HashVarLen(bool combine_hashes, uint32_t num_rows, const uint64_t* offsets,
|
||||
const uint8_t* concatenated_keys, uint64_t* hashes);
|
||||
|
||||
static inline uint64_t Avalanche(uint64_t acc);
|
||||
static inline uint64_t Round(uint64_t acc, uint64_t input);
|
||||
static inline uint64_t CombineAccumulators(uint64_t acc1, uint64_t acc2, uint64_t acc3,
|
||||
uint64_t acc4);
|
||||
static inline uint64_t CombineHashesImp(uint64_t previous_hash, uint64_t hash) {
|
||||
uint64_t next_hash = previous_hash ^ (hash + kCombineConst + (previous_hash << 6) +
|
||||
(previous_hash >> 2));
|
||||
return next_hash;
|
||||
}
|
||||
static inline void ProcessFullStripes(uint64_t num_stripes, const uint8_t* key,
|
||||
uint64_t* out_acc1, uint64_t* out_acc2,
|
||||
uint64_t* out_acc3, uint64_t* out_acc4);
|
||||
static inline void ProcessLastStripe(uint64_t mask1, uint64_t mask2, uint64_t mask3,
|
||||
uint64_t mask4, const uint8_t* last_stripe,
|
||||
uint64_t* acc1, uint64_t* acc2, uint64_t* acc3,
|
||||
uint64_t* acc4);
|
||||
static inline void StripeMask(int i, uint64_t* mask1, uint64_t* mask2, uint64_t* mask3,
|
||||
uint64_t* mask4);
|
||||
template <bool T_COMBINE_HASHES>
|
||||
static void HashFixedLenImp(uint32_t num_rows, uint64_t length, const uint8_t* keys,
|
||||
uint64_t* hashes);
|
||||
template <typename T, bool T_COMBINE_HASHES>
|
||||
static void HashVarLenImp(uint32_t num_rows, const T* offsets,
|
||||
const uint8_t* concatenated_keys, uint64_t* hashes);
|
||||
template <bool T_COMBINE_HASHES>
|
||||
static void HashBitImp(int64_t bit_offset, uint32_t num_keys, const uint8_t* keys,
|
||||
uint64_t* hashes);
|
||||
static void HashBit(bool T_COMBINE_HASHES, int64_t bit_offset, uint32_t num_keys,
|
||||
const uint8_t* keys, uint64_t* hashes);
|
||||
template <bool T_COMBINE_HASHES, typename T>
|
||||
static void HashIntImp(uint32_t num_keys, const T* keys, uint64_t* hashes);
|
||||
static void HashInt(bool T_COMBINE_HASHES, uint32_t num_keys, uint64_t length_key,
|
||||
const uint8_t* keys, uint64_t* hashes);
|
||||
};
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,275 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "arrow/compute/exec/util.h"
|
||||
#include "arrow/memory_pool.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/status.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
// SwissTable is a variant of a hash table implementation.
|
||||
// This implementation is vectorized, that is: main interface methods take arrays of input
|
||||
// values and output arrays of result values.
|
||||
//
|
||||
// A detailed explanation of this data structure (including concepts such as blocks,
|
||||
// slots, stamps) and operations provided by this class is given in the document:
|
||||
// arrow/compute/exec/doc/key_map.md.
|
||||
//
|
||||
class SwissTable {
|
||||
friend class SwissTableMerge;
|
||||
|
||||
public:
|
||||
SwissTable() = default;
|
||||
~SwissTable() { cleanup(); }
|
||||
|
||||
using EqualImpl =
|
||||
std::function<void(int num_keys, const uint16_t* selection /* may be null */,
|
||||
const uint32_t* group_ids, uint32_t* out_num_keys_mismatch,
|
||||
uint16_t* out_selection_mismatch, void* callback_ctx)>;
|
||||
using AppendImpl =
|
||||
std::function<Status(int num_keys, const uint16_t* selection, void* callback_ctx)>;
|
||||
|
||||
Status init(int64_t hardware_flags, MemoryPool* pool, int log_blocks = 0,
|
||||
bool no_hash_array = false);
|
||||
|
||||
void cleanup();
|
||||
|
||||
void early_filter(const int num_keys, const uint32_t* hashes,
|
||||
uint8_t* out_match_bitvector, uint8_t* out_local_slots) const;
|
||||
|
||||
void find(const int num_keys, const uint32_t* hashes, uint8_t* inout_match_bitvector,
|
||||
const uint8_t* local_slots, uint32_t* out_group_ids,
|
||||
util::TempVectorStack* temp_stack, const EqualImpl& equal_impl,
|
||||
void* callback_ctx) const;
|
||||
|
||||
Status map_new_keys(uint32_t num_ids, uint16_t* ids, const uint32_t* hashes,
|
||||
uint32_t* group_ids, util::TempVectorStack* temp_stack,
|
||||
const EqualImpl& equal_impl, const AppendImpl& append_impl,
|
||||
void* callback_ctx);
|
||||
|
||||
int minibatch_size() const { return 1 << log_minibatch_; }
|
||||
|
||||
int64_t num_inserted() const { return num_inserted_; }
|
||||
|
||||
int64_t hardware_flags() const { return hardware_flags_; }
|
||||
|
||||
MemoryPool* pool() const { return pool_; }
|
||||
|
||||
private:
|
||||
// Lookup helpers
|
||||
|
||||
/// \brief Scan bytes in block in reverse and stop as soon
|
||||
/// as a position of interest is found.
|
||||
///
|
||||
/// Positions of interest:
|
||||
/// a) slot with a matching stamp is encountered,
|
||||
/// b) first empty slot is encountered,
|
||||
/// c) we reach the end of the block.
|
||||
///
|
||||
/// Optionally an index of the first slot to start the search from can be specified.
|
||||
/// In this case slots before it will be ignored.
|
||||
///
|
||||
/// \param[in] block 8 byte block of hash table
|
||||
/// \param[in] stamp 7 bits of hash used as a stamp
|
||||
/// \param[in] start_slot Index of the first slot in the block to start search from. We
|
||||
/// assume that this index always points to a non-empty slot, equivalently
|
||||
/// that it comes before any empty slots. (Used only by one template
|
||||
/// variant.)
|
||||
/// \param[out] out_slot index corresponding to the discovered position of interest (8
|
||||
/// represents end of block).
|
||||
/// \param[out] out_match_found an integer flag (0 or 1) indicating if we reached an
|
||||
/// empty slot (0) or not (1). Therefore 1 can mean that either actual match was found
|
||||
/// (case a) above) or we reached the end of full block (case b) above).
|
||||
///
|
||||
template <bool use_start_slot>
|
||||
inline void search_block(uint64_t block, int stamp, int start_slot, int* out_slot,
|
||||
int* out_match_found) const;
|
||||
|
||||
/// \brief Extract group id for a given slot in a given block.
|
||||
///
|
||||
inline uint64_t extract_group_id(const uint8_t* block_ptr, int slot,
|
||||
uint64_t group_id_mask) const;
|
||||
void extract_group_ids(const int num_keys, const uint16_t* optional_selection,
|
||||
const uint32_t* hashes, const uint8_t* local_slots,
|
||||
uint32_t* out_group_ids) const;
|
||||
|
||||
template <typename T, bool use_selection>
|
||||
void extract_group_ids_imp(const int num_keys, const uint16_t* selection,
|
||||
const uint32_t* hashes, const uint8_t* local_slots,
|
||||
uint32_t* out_group_ids, int elements_offset,
|
||||
int element_mutltiplier) const;
|
||||
|
||||
inline uint64_t next_slot_to_visit(uint64_t block_index, int slot,
|
||||
int match_found) const;
|
||||
|
||||
inline uint64_t num_groups_for_resize() const;
|
||||
|
||||
inline uint64_t wrap_global_slot_id(uint64_t global_slot_id) const;
|
||||
|
||||
void init_slot_ids(const int num_keys, const uint16_t* selection,
|
||||
const uint32_t* hashes, const uint8_t* local_slots,
|
||||
const uint8_t* match_bitvector, uint32_t* out_slot_ids) const;
|
||||
|
||||
void init_slot_ids_for_new_keys(uint32_t num_ids, const uint16_t* ids,
|
||||
const uint32_t* hashes, uint32_t* slot_ids) const;
|
||||
|
||||
// Quickly filter out keys that have no matches based only on hash value and the
|
||||
// corresponding starting 64-bit block of slot status bytes. May return false positives.
|
||||
//
|
||||
void early_filter_imp(const int num_keys, const uint32_t* hashes,
|
||||
uint8_t* out_match_bitvector, uint8_t* out_local_slots) const;
|
||||
#if defined(ARROW_HAVE_AVX2)
|
||||
int early_filter_imp_avx2_x8(const int num_hashes, const uint32_t* hashes,
|
||||
uint8_t* out_match_bitvector,
|
||||
uint8_t* out_local_slots) const;
|
||||
int early_filter_imp_avx2_x32(const int num_hashes, const uint32_t* hashes,
|
||||
uint8_t* out_match_bitvector,
|
||||
uint8_t* out_local_slots) const;
|
||||
int extract_group_ids_avx2(const int num_keys, const uint32_t* hashes,
|
||||
const uint8_t* local_slots, uint32_t* out_group_ids,
|
||||
int byte_offset, int byte_multiplier, int byte_size) const;
|
||||
#endif
|
||||
|
||||
void run_comparisons(const int num_keys, const uint16_t* optional_selection_ids,
|
||||
const uint8_t* optional_selection_bitvector,
|
||||
const uint32_t* groupids, int* out_num_not_equal,
|
||||
uint16_t* out_not_equal_selection, const EqualImpl& equal_impl,
|
||||
void* callback_ctx) const;
|
||||
|
||||
inline bool find_next_stamp_match(const uint32_t hash, const uint32_t in_slot_id,
|
||||
uint32_t* out_slot_id, uint32_t* out_group_id) const;
|
||||
|
||||
inline void insert_into_empty_slot(uint32_t slot_id, uint32_t hash, uint32_t group_id);
|
||||
|
||||
// Slow processing of input keys in the most generic case.
|
||||
// Handles inserting new keys.
|
||||
// Pre-existing keys will be handled correctly, although the intended use is for this
|
||||
// call to follow a call to find() method, which would only pass on new keys that were
|
||||
// not present in the hash table.
|
||||
//
|
||||
Status map_new_keys_helper(const uint32_t* hashes, uint32_t* inout_num_selected,
|
||||
uint16_t* inout_selection, bool* out_need_resize,
|
||||
uint32_t* out_group_ids, uint32_t* out_next_slot_ids,
|
||||
util::TempVectorStack* temp_stack,
|
||||
const EqualImpl& equal_impl, const AppendImpl& append_impl,
|
||||
void* callback_ctx);
|
||||
|
||||
// Resize small hash tables when 50% full (up to 8KB).
|
||||
// Resize large hash tables when 75% full.
|
||||
Status grow_double();
|
||||
|
||||
static int num_groupid_bits_from_log_blocks(int log_blocks) {
|
||||
int required_bits = log_blocks + 3;
|
||||
return required_bits <= 8 ? 8
|
||||
: required_bits <= 16 ? 16
|
||||
: required_bits <= 32 ? 32
|
||||
: 64;
|
||||
}
|
||||
|
||||
// Use 32-bit hash for now
|
||||
static constexpr int bits_hash_ = 32;
|
||||
|
||||
// Number of hash bits stored in slots in a block.
|
||||
// The highest bits of hash determine block id.
|
||||
// The next set of highest bits is a "stamp" stored in a slot in a block.
|
||||
static constexpr int bits_stamp_ = 7;
|
||||
|
||||
// Padding bytes added at the end of buffers for ease of SIMD access
|
||||
static constexpr int padding_ = 64;
|
||||
|
||||
int log_minibatch_;
|
||||
// Base 2 log of the number of blocks
|
||||
int log_blocks_ = 0;
|
||||
// Number of keys inserted into hash table
|
||||
uint32_t num_inserted_ = 0;
|
||||
|
||||
// Data for blocks.
|
||||
// Each block has 8 status bytes for 8 slots, followed by 8 bit packed group ids for
|
||||
// these slots. In 8B status word, the order of bytes is reversed. Group ids are in
|
||||
// normal order. There is 64B padding at the end.
|
||||
//
|
||||
// 0 byte - 7 bucket | 1. byte - 6 bucket | ...
|
||||
// ---------------------------------------------------
|
||||
// | Empty bit* | Empty bit |
|
||||
// ---------------------------------------------------
|
||||
// | 7-bit hash | 7-bit hash |
|
||||
// ---------------------------------------------------
|
||||
// * Empty bucket has value 0x80. Non-empty bucket has highest bit set to 0.
|
||||
//
|
||||
uint8_t* blocks_;
|
||||
|
||||
// Array of hashes of values inserted into slots.
|
||||
// Undefined if the corresponding slot is empty.
|
||||
// There is 64B padding at the end.
|
||||
uint32_t* hashes_;
|
||||
|
||||
int64_t hardware_flags_;
|
||||
MemoryPool* pool_;
|
||||
};
|
||||
|
||||
uint64_t SwissTable::extract_group_id(const uint8_t* block_ptr, int slot,
|
||||
uint64_t group_id_mask) const {
|
||||
// Group id values for all 8 slots in the block are bit-packed and follow the status
|
||||
// bytes. We assume here that the number of bits is rounded up to 8, 16, 32 or 64. In
|
||||
// that case we can extract group id using aligned 64-bit word access.
|
||||
int num_group_id_bits = static_cast<int>(ARROW_POPCOUNT64(group_id_mask));
|
||||
ARROW_DCHECK(num_group_id_bits == 8 || num_group_id_bits == 16 ||
|
||||
num_group_id_bits == 32 || num_group_id_bits == 64);
|
||||
|
||||
int bit_offset = slot * num_group_id_bits;
|
||||
const uint64_t* group_id_bytes =
|
||||
reinterpret_cast<const uint64_t*>(block_ptr) + 1 + (bit_offset >> 6);
|
||||
uint64_t group_id = (*group_id_bytes >> (bit_offset & 63)) & group_id_mask;
|
||||
|
||||
return group_id;
|
||||
}
|
||||
|
||||
void SwissTable::insert_into_empty_slot(uint32_t slot_id, uint32_t hash,
|
||||
uint32_t group_id) {
|
||||
const uint64_t num_groupid_bits = num_groupid_bits_from_log_blocks(log_blocks_);
|
||||
|
||||
// We assume here that the number of bits is rounded up to 8, 16, 32 or 64.
|
||||
// In that case we can insert group id value using aligned 64-bit word access.
|
||||
ARROW_DCHECK(num_groupid_bits == 8 || num_groupid_bits == 16 ||
|
||||
num_groupid_bits == 32 || num_groupid_bits == 64);
|
||||
|
||||
const uint64_t num_block_bytes = (8 + num_groupid_bits);
|
||||
constexpr uint64_t stamp_mask = 0x7f;
|
||||
|
||||
int start_slot = (slot_id & 7);
|
||||
int stamp =
|
||||
static_cast<int>((hash >> (bits_hash_ - log_blocks_ - bits_stamp_)) & stamp_mask);
|
||||
uint64_t block_id = slot_id >> 3;
|
||||
uint8_t* blockbase = blocks_ + num_block_bytes * block_id;
|
||||
|
||||
blockbase[7 - start_slot] = static_cast<uint8_t>(stamp);
|
||||
int groupid_bit_offset = static_cast<int>(start_slot * num_groupid_bits);
|
||||
|
||||
// Block status bytes should start at an address aligned to 8 bytes
|
||||
ARROW_DCHECK((reinterpret_cast<uint64_t>(blockbase) & 7) == 0);
|
||||
uint64_t* ptr = reinterpret_cast<uint64_t*>(blockbase) + 1 + (groupid_bit_offset >> 6);
|
||||
*ptr |= (static_cast<uint64_t>(group_id) << (groupid_bit_offset & 63));
|
||||
}
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,75 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
/// \brief MapNode is an ExecNode type class which process a task like filter/project
|
||||
/// (See SubmitTask method) to each given ExecBatch object, which have one input, one
|
||||
/// output, and are pure functions on the input
|
||||
///
|
||||
/// A simple parallel runner is created with a "map_fn" which is just a function that
|
||||
/// takes a batch in and returns a batch. This simple parallel runner also needs an
|
||||
/// executor (use simple synchronous runner if there is no executor)
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/exec/exec_plan.h"
|
||||
#include "arrow/compute/exec/util.h"
|
||||
#include "arrow/compute/type_fwd.h"
|
||||
#include "arrow/status.h"
|
||||
#include "arrow/type_fwd.h"
|
||||
#include "arrow/util/cancel.h"
|
||||
#include "arrow/util/type_fwd.h"
|
||||
#include "arrow/util/visibility.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
class ARROW_EXPORT MapNode : public ExecNode {
|
||||
public:
|
||||
MapNode(ExecPlan* plan, std::vector<ExecNode*> inputs,
|
||||
std::shared_ptr<Schema> output_schema);
|
||||
|
||||
void ErrorReceived(ExecNode* input, Status error) override;
|
||||
|
||||
void InputFinished(ExecNode* input, int total_batches) override;
|
||||
|
||||
Status StartProducing() override;
|
||||
|
||||
void PauseProducing(ExecNode* output, int32_t counter) override;
|
||||
|
||||
void ResumeProducing(ExecNode* output, int32_t counter) override;
|
||||
|
||||
void StopProducing(ExecNode* output) override;
|
||||
|
||||
void StopProducing() override;
|
||||
|
||||
protected:
|
||||
void SubmitTask(std::function<Result<ExecBatch>(ExecBatch)> map_fn, ExecBatch batch);
|
||||
|
||||
virtual void Finish(Status finish_st = Status::OK());
|
||||
|
||||
protected:
|
||||
// Counter for the number of batches received
|
||||
AtomicCounter input_counter_;
|
||||
};
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,562 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/api_aggregate.h"
|
||||
#include "arrow/compute/api_vector.h"
|
||||
#include "arrow/compute/exec.h"
|
||||
#include "arrow/compute/exec/expression.h"
|
||||
#include "arrow/record_batch.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/util/async_generator.h"
|
||||
#include "arrow/util/async_util.h"
|
||||
#include "arrow/util/visibility.h"
|
||||
|
||||
namespace arrow {
|
||||
|
||||
namespace internal {
|
||||
|
||||
class Executor;
|
||||
|
||||
} // namespace internal
|
||||
|
||||
namespace compute {
|
||||
|
||||
using AsyncExecBatchGenerator = AsyncGenerator<std::optional<ExecBatch>>;
|
||||
|
||||
/// \addtogroup execnode-options
|
||||
/// @{
|
||||
class ARROW_EXPORT ExecNodeOptions {
|
||||
public:
|
||||
virtual ~ExecNodeOptions() = default;
|
||||
};
|
||||
|
||||
/// \brief Adapt an AsyncGenerator<ExecBatch> as a source node
|
||||
///
|
||||
/// plan->exec_context()->executor() will be used to parallelize pushing to
|
||||
/// outputs, if provided.
|
||||
class ARROW_EXPORT SourceNodeOptions : public ExecNodeOptions {
|
||||
public:
|
||||
SourceNodeOptions(std::shared_ptr<Schema> output_schema,
|
||||
std::function<Future<std::optional<ExecBatch>>()> generator)
|
||||
: output_schema(std::move(output_schema)), generator(std::move(generator)) {}
|
||||
|
||||
static Result<std::shared_ptr<SourceNodeOptions>> FromTable(const Table& table,
|
||||
arrow::internal::Executor*);
|
||||
|
||||
static Result<std::shared_ptr<SourceNodeOptions>> FromRecordBatchReader(
|
||||
std::shared_ptr<RecordBatchReader> reader, std::shared_ptr<Schema> schema,
|
||||
arrow::internal::Executor*);
|
||||
|
||||
std::shared_ptr<Schema> output_schema;
|
||||
std::function<Future<std::optional<ExecBatch>>()> generator;
|
||||
};
|
||||
|
||||
/// \brief An extended Source node which accepts a table
|
||||
class ARROW_EXPORT TableSourceNodeOptions : public ExecNodeOptions {
|
||||
public:
|
||||
static constexpr int64_t kDefaultMaxBatchSize = 1 << 20;
|
||||
TableSourceNodeOptions(std::shared_ptr<Table> table,
|
||||
int64_t max_batch_size = kDefaultMaxBatchSize)
|
||||
: table(table), max_batch_size(max_batch_size) {}
|
||||
|
||||
// arrow table which acts as the data source
|
||||
std::shared_ptr<Table> table;
|
||||
// Size of batches to emit from this node
|
||||
// If the table is larger the node will emit multiple batches from the
|
||||
// the table to be processed in parallel.
|
||||
int64_t max_batch_size;
|
||||
};
|
||||
|
||||
/// \brief Define a lazy resolved Arrow table.
|
||||
///
|
||||
/// The table uniquely identified by the names can typically be resolved at the time when
|
||||
/// the plan is to be consumed.
|
||||
///
|
||||
/// This node is for serialization purposes only and can never be executed.
|
||||
class ARROW_EXPORT NamedTableNodeOptions : public ExecNodeOptions {
|
||||
public:
|
||||
NamedTableNodeOptions(std::vector<std::string> names, std::shared_ptr<Schema> schema)
|
||||
: names(std::move(names)), schema(schema) {}
|
||||
|
||||
std::vector<std::string> names;
|
||||
std::shared_ptr<Schema> schema;
|
||||
};
|
||||
|
||||
/// \brief An extended Source node which accepts a schema
|
||||
///
|
||||
/// ItMaker is a maker of an iterator of tabular data.
|
||||
template <typename ItMaker>
|
||||
class ARROW_EXPORT SchemaSourceNodeOptions : public ExecNodeOptions {
|
||||
public:
|
||||
SchemaSourceNodeOptions(std::shared_ptr<Schema> schema, ItMaker it_maker,
|
||||
arrow::internal::Executor* io_executor = NULLPTR)
|
||||
: schema(schema), it_maker(std::move(it_maker)), io_executor(io_executor) {}
|
||||
|
||||
/// \brief The schema of the record batches from the iterator
|
||||
std::shared_ptr<Schema> schema;
|
||||
|
||||
/// \brief A maker of an iterator which acts as the data source
|
||||
ItMaker it_maker;
|
||||
|
||||
/// \brief The executor to use for scanning the iterator
|
||||
///
|
||||
/// Defaults to the default I/O executor.
|
||||
arrow::internal::Executor* io_executor;
|
||||
};
|
||||
|
||||
class ARROW_EXPORT RecordBatchReaderSourceNodeOptions : public ExecNodeOptions {
|
||||
public:
|
||||
RecordBatchReaderSourceNodeOptions(std::shared_ptr<RecordBatchReader> reader,
|
||||
arrow::internal::Executor* io_executor = NULLPTR)
|
||||
: reader(std::move(reader)), io_executor(io_executor) {}
|
||||
|
||||
/// \brief The RecordBatchReader which acts as the data source
|
||||
std::shared_ptr<RecordBatchReader> reader;
|
||||
|
||||
/// \brief The executor to use for the reader
|
||||
///
|
||||
/// Defaults to the default I/O executor.
|
||||
arrow::internal::Executor* io_executor;
|
||||
};
|
||||
|
||||
using ArrayVectorIteratorMaker = std::function<Iterator<std::shared_ptr<ArrayVector>>()>;
|
||||
/// \brief An extended Source node which accepts a schema and array-vectors
|
||||
class ARROW_EXPORT ArrayVectorSourceNodeOptions
|
||||
: public SchemaSourceNodeOptions<ArrayVectorIteratorMaker> {
|
||||
using SchemaSourceNodeOptions::SchemaSourceNodeOptions;
|
||||
};
|
||||
|
||||
using ExecBatchIteratorMaker = std::function<Iterator<std::shared_ptr<ExecBatch>>()>;
|
||||
/// \brief An extended Source node which accepts a schema and exec-batches
|
||||
class ARROW_EXPORT ExecBatchSourceNodeOptions
|
||||
: public SchemaSourceNodeOptions<ExecBatchIteratorMaker> {
|
||||
using SchemaSourceNodeOptions::SchemaSourceNodeOptions;
|
||||
};
|
||||
|
||||
using RecordBatchIteratorMaker = std::function<Iterator<std::shared_ptr<RecordBatch>>()>;
|
||||
/// \brief An extended Source node which accepts a schema and record-batches
|
||||
class ARROW_EXPORT RecordBatchSourceNodeOptions
|
||||
: public SchemaSourceNodeOptions<RecordBatchIteratorMaker> {
|
||||
using SchemaSourceNodeOptions::SchemaSourceNodeOptions;
|
||||
};
|
||||
|
||||
/// \brief Make a node which excludes some rows from batches passed through it
|
||||
///
|
||||
/// filter_expression will be evaluated against each batch which is pushed to
|
||||
/// this node. Any rows for which filter_expression does not evaluate to `true` will be
|
||||
/// excluded in the batch emitted by this node.
|
||||
class ARROW_EXPORT FilterNodeOptions : public ExecNodeOptions {
|
||||
public:
|
||||
explicit FilterNodeOptions(Expression filter_expression)
|
||||
: filter_expression(std::move(filter_expression)) {}
|
||||
|
||||
Expression filter_expression;
|
||||
};
|
||||
|
||||
/// \brief Make a node which executes expressions on input batches, producing new batches.
|
||||
///
|
||||
/// Each expression will be evaluated against each batch which is pushed to
|
||||
/// this node to produce a corresponding output column.
|
||||
///
|
||||
/// If names are not provided, the string representations of exprs will be used.
|
||||
class ARROW_EXPORT ProjectNodeOptions : public ExecNodeOptions {
|
||||
public:
|
||||
explicit ProjectNodeOptions(std::vector<Expression> expressions,
|
||||
std::vector<std::string> names = {})
|
||||
: expressions(std::move(expressions)), names(std::move(names)) {}
|
||||
|
||||
std::vector<Expression> expressions;
|
||||
std::vector<std::string> names;
|
||||
};
|
||||
|
||||
/// \brief Make a node which aggregates input batches, optionally grouped by keys.
|
||||
///
|
||||
/// If the keys attribute is a non-empty vector, then each aggregate in `aggregates` is
|
||||
/// expected to be a HashAggregate function. If the keys attribute is an empty vector,
|
||||
/// then each aggregate is assumed to be a ScalarAggregate function.
|
||||
class ARROW_EXPORT AggregateNodeOptions : public ExecNodeOptions {
|
||||
public:
|
||||
explicit AggregateNodeOptions(std::vector<Aggregate> aggregates,
|
||||
std::vector<FieldRef> keys = {})
|
||||
: aggregates(std::move(aggregates)), keys(std::move(keys)) {}
|
||||
|
||||
// aggregations which will be applied to the targetted fields
|
||||
std::vector<Aggregate> aggregates;
|
||||
// keys by which aggregations will be grouped
|
||||
std::vector<FieldRef> keys;
|
||||
};
|
||||
|
||||
constexpr int32_t kDefaultBackpressureHighBytes = 1 << 30; // 1GiB
|
||||
constexpr int32_t kDefaultBackpressureLowBytes = 1 << 28; // 256MiB
|
||||
|
||||
class ARROW_EXPORT BackpressureMonitor {
|
||||
public:
|
||||
virtual ~BackpressureMonitor() = default;
|
||||
virtual uint64_t bytes_in_use() = 0;
|
||||
virtual bool is_paused() = 0;
|
||||
};
|
||||
|
||||
/// \brief Options to control backpressure behavior
|
||||
struct ARROW_EXPORT BackpressureOptions {
|
||||
/// \brief Create default options that perform no backpressure
|
||||
BackpressureOptions() : resume_if_below(0), pause_if_above(0) {}
|
||||
/// \brief Create options that will perform backpressure
|
||||
///
|
||||
/// \param resume_if_below The producer should resume producing if the backpressure
|
||||
/// queue has fewer than resume_if_below items.
|
||||
/// \param pause_if_above The producer should pause producing if the backpressure
|
||||
/// queue has more than pause_if_above items
|
||||
BackpressureOptions(uint64_t resume_if_below, uint64_t pause_if_above)
|
||||
: resume_if_below(resume_if_below), pause_if_above(pause_if_above) {}
|
||||
|
||||
static BackpressureOptions DefaultBackpressure() {
|
||||
return BackpressureOptions(kDefaultBackpressureLowBytes,
|
||||
kDefaultBackpressureHighBytes);
|
||||
}
|
||||
|
||||
bool should_apply_backpressure() const { return pause_if_above > 0; }
|
||||
|
||||
uint64_t resume_if_below;
|
||||
uint64_t pause_if_above;
|
||||
};
|
||||
|
||||
/// \brief Add a sink node which forwards to an AsyncGenerator<ExecBatch>
|
||||
///
|
||||
/// Emitted batches will not be ordered.
|
||||
class ARROW_EXPORT SinkNodeOptions : public ExecNodeOptions {
|
||||
public:
|
||||
explicit SinkNodeOptions(std::function<Future<std::optional<ExecBatch>>()>* generator,
|
||||
std::shared_ptr<Schema>* schema,
|
||||
BackpressureOptions backpressure = {},
|
||||
BackpressureMonitor** backpressure_monitor = NULLPTR)
|
||||
: generator(generator),
|
||||
schema(schema),
|
||||
backpressure(backpressure),
|
||||
backpressure_monitor(backpressure_monitor) {}
|
||||
|
||||
explicit SinkNodeOptions(std::function<Future<std::optional<ExecBatch>>()>* generator,
|
||||
BackpressureOptions backpressure = {},
|
||||
BackpressureMonitor** backpressure_monitor = NULLPTR)
|
||||
: generator(generator),
|
||||
schema(NULLPTR),
|
||||
backpressure(std::move(backpressure)),
|
||||
backpressure_monitor(backpressure_monitor) {}
|
||||
|
||||
/// \brief A pointer to a generator of batches.
|
||||
///
|
||||
/// This will be set when the node is added to the plan and should be used to consume
|
||||
/// data from the plan. If this function is not called frequently enough then the sink
|
||||
/// node will start to accumulate data and may apply backpressure.
|
||||
std::function<Future<std::optional<ExecBatch>>()>* generator;
|
||||
/// \brief A pointer which will be set to the schema of the generated batches
|
||||
///
|
||||
/// This is optional, if nullptr is passed in then it will be ignored.
|
||||
/// This will be set when the node is added to the plan, before StartProducing is called
|
||||
std::shared_ptr<Schema>* schema;
|
||||
/// \brief Options to control when to apply backpressure
|
||||
///
|
||||
/// This is optional, the default is to never apply backpressure. If the plan is not
|
||||
/// consumed quickly enough the system may eventually run out of memory.
|
||||
BackpressureOptions backpressure;
|
||||
/// \brief A pointer to a backpressure monitor
|
||||
///
|
||||
/// This will be set when the node is added to the plan. This can be used to inspect
|
||||
/// the amount of data currently queued in the sink node. This is an optional utility
|
||||
/// and backpressure can be applied even if this is not used.
|
||||
BackpressureMonitor** backpressure_monitor;
|
||||
};
|
||||
|
||||
/// \brief Control used by a SinkNodeConsumer to pause & resume
|
||||
///
|
||||
/// Callers should ensure that they do not call Pause and Resume simultaneously and they
|
||||
/// should sequence things so that a call to Pause() is always followed by an eventual
|
||||
/// call to Resume()
|
||||
class ARROW_EXPORT BackpressureControl {
|
||||
public:
|
||||
virtual ~BackpressureControl() = default;
|
||||
/// \brief Ask the input to pause
|
||||
///
|
||||
/// This is best effort, batches may continue to arrive
|
||||
/// Must eventually be followed by a call to Resume() or deadlock will occur
|
||||
virtual void Pause() = 0;
|
||||
/// \brief Ask the input to resume
|
||||
virtual void Resume() = 0;
|
||||
};
|
||||
|
||||
class ARROW_EXPORT SinkNodeConsumer {
|
||||
public:
|
||||
virtual ~SinkNodeConsumer() = default;
|
||||
/// \brief Prepare any consumer state
|
||||
///
|
||||
/// This will be run once the schema is finalized as the plan is starting and
|
||||
/// before any calls to Consume. A common use is to save off the schema so that
|
||||
/// batches can be interpreted.
|
||||
/// TODO(ARROW-17837) Move ExecPlan* plan to query context
|
||||
virtual Status Init(const std::shared_ptr<Schema>& schema,
|
||||
BackpressureControl* backpressure_control, ExecPlan* plan) = 0;
|
||||
/// \brief Consume a batch of data
|
||||
virtual Status Consume(ExecBatch batch) = 0;
|
||||
/// \brief Signal to the consumer that the last batch has been delivered
|
||||
///
|
||||
/// The returned future should only finish when all outstanding tasks have completed
|
||||
virtual Future<> Finish() = 0;
|
||||
};
|
||||
|
||||
/// \brief Add a sink node which consumes data within the exec plan run
|
||||
class ARROW_EXPORT ConsumingSinkNodeOptions : public ExecNodeOptions {
|
||||
public:
|
||||
explicit ConsumingSinkNodeOptions(std::shared_ptr<SinkNodeConsumer> consumer,
|
||||
std::vector<std::string> names = {})
|
||||
: consumer(std::move(consumer)), names(std::move(names)) {}
|
||||
|
||||
std::shared_ptr<SinkNodeConsumer> consumer;
|
||||
/// \brief Names to rename the sink's schema fields to
|
||||
///
|
||||
/// If specified then names must be provided for all fields. Currently, only a flat
|
||||
/// schema is supported (see ARROW-15901).
|
||||
std::vector<std::string> names;
|
||||
};
|
||||
|
||||
/// \brief Make a node which sorts rows passed through it
|
||||
///
|
||||
/// All batches pushed to this node will be accumulated, then sorted, by the given
|
||||
/// fields. Then sorted batches will be forwarded to the generator in sorted order.
|
||||
class ARROW_EXPORT OrderBySinkNodeOptions : public SinkNodeOptions {
|
||||
public:
|
||||
explicit OrderBySinkNodeOptions(
|
||||
SortOptions sort_options,
|
||||
std::function<Future<std::optional<ExecBatch>>()>* generator)
|
||||
: SinkNodeOptions(generator), sort_options(std::move(sort_options)) {}
|
||||
|
||||
SortOptions sort_options;
|
||||
};
|
||||
|
||||
/// @}
|
||||
|
||||
enum class JoinType {
|
||||
LEFT_SEMI,
|
||||
RIGHT_SEMI,
|
||||
LEFT_ANTI,
|
||||
RIGHT_ANTI,
|
||||
INNER,
|
||||
LEFT_OUTER,
|
||||
RIGHT_OUTER,
|
||||
FULL_OUTER
|
||||
};
|
||||
|
||||
std::string ToString(JoinType t);
|
||||
|
||||
enum class JoinKeyCmp { EQ, IS };
|
||||
|
||||
/// \addtogroup execnode-options
|
||||
/// @{
|
||||
|
||||
/// \brief Make a node which implements join operation using hash join strategy.
|
||||
class ARROW_EXPORT HashJoinNodeOptions : public ExecNodeOptions {
|
||||
public:
|
||||
static constexpr const char* default_output_suffix_for_left = "";
|
||||
static constexpr const char* default_output_suffix_for_right = "";
|
||||
HashJoinNodeOptions(
|
||||
JoinType in_join_type, std::vector<FieldRef> in_left_keys,
|
||||
std::vector<FieldRef> in_right_keys, Expression filter = literal(true),
|
||||
std::string output_suffix_for_left = default_output_suffix_for_left,
|
||||
std::string output_suffix_for_right = default_output_suffix_for_right,
|
||||
bool disable_bloom_filter = false)
|
||||
: join_type(in_join_type),
|
||||
left_keys(std::move(in_left_keys)),
|
||||
right_keys(std::move(in_right_keys)),
|
||||
output_all(true),
|
||||
output_suffix_for_left(std::move(output_suffix_for_left)),
|
||||
output_suffix_for_right(std::move(output_suffix_for_right)),
|
||||
filter(std::move(filter)),
|
||||
disable_bloom_filter(disable_bloom_filter) {
|
||||
this->key_cmp.resize(this->left_keys.size());
|
||||
for (size_t i = 0; i < this->left_keys.size(); ++i) {
|
||||
this->key_cmp[i] = JoinKeyCmp::EQ;
|
||||
}
|
||||
}
|
||||
HashJoinNodeOptions(std::vector<FieldRef> in_left_keys,
|
||||
std::vector<FieldRef> in_right_keys)
|
||||
: left_keys(std::move(in_left_keys)), right_keys(std::move(in_right_keys)) {
|
||||
this->join_type = JoinType::INNER;
|
||||
this->output_all = true;
|
||||
this->output_suffix_for_left = default_output_suffix_for_left;
|
||||
this->output_suffix_for_right = default_output_suffix_for_right;
|
||||
this->key_cmp.resize(this->left_keys.size());
|
||||
for (size_t i = 0; i < this->left_keys.size(); ++i) {
|
||||
this->key_cmp[i] = JoinKeyCmp::EQ;
|
||||
}
|
||||
this->filter = literal(true);
|
||||
}
|
||||
HashJoinNodeOptions(
|
||||
JoinType join_type, std::vector<FieldRef> left_keys,
|
||||
std::vector<FieldRef> right_keys, std::vector<FieldRef> left_output,
|
||||
std::vector<FieldRef> right_output, Expression filter = literal(true),
|
||||
std::string output_suffix_for_left = default_output_suffix_for_left,
|
||||
std::string output_suffix_for_right = default_output_suffix_for_right,
|
||||
bool disable_bloom_filter = false)
|
||||
: join_type(join_type),
|
||||
left_keys(std::move(left_keys)),
|
||||
right_keys(std::move(right_keys)),
|
||||
output_all(false),
|
||||
left_output(std::move(left_output)),
|
||||
right_output(std::move(right_output)),
|
||||
output_suffix_for_left(std::move(output_suffix_for_left)),
|
||||
output_suffix_for_right(std::move(output_suffix_for_right)),
|
||||
filter(std::move(filter)),
|
||||
disable_bloom_filter(disable_bloom_filter) {
|
||||
this->key_cmp.resize(this->left_keys.size());
|
||||
for (size_t i = 0; i < this->left_keys.size(); ++i) {
|
||||
this->key_cmp[i] = JoinKeyCmp::EQ;
|
||||
}
|
||||
}
|
||||
HashJoinNodeOptions(
|
||||
JoinType join_type, std::vector<FieldRef> left_keys,
|
||||
std::vector<FieldRef> right_keys, std::vector<FieldRef> left_output,
|
||||
std::vector<FieldRef> right_output, std::vector<JoinKeyCmp> key_cmp,
|
||||
Expression filter = literal(true),
|
||||
std::string output_suffix_for_left = default_output_suffix_for_left,
|
||||
std::string output_suffix_for_right = default_output_suffix_for_right,
|
||||
bool disable_bloom_filter = false)
|
||||
: join_type(join_type),
|
||||
left_keys(std::move(left_keys)),
|
||||
right_keys(std::move(right_keys)),
|
||||
output_all(false),
|
||||
left_output(std::move(left_output)),
|
||||
right_output(std::move(right_output)),
|
||||
key_cmp(std::move(key_cmp)),
|
||||
output_suffix_for_left(std::move(output_suffix_for_left)),
|
||||
output_suffix_for_right(std::move(output_suffix_for_right)),
|
||||
filter(std::move(filter)),
|
||||
disable_bloom_filter(disable_bloom_filter) {}
|
||||
|
||||
HashJoinNodeOptions() = default;
|
||||
|
||||
// type of join (inner, left, semi...)
|
||||
JoinType join_type = JoinType::INNER;
|
||||
// key fields from left input
|
||||
std::vector<FieldRef> left_keys;
|
||||
// key fields from right input
|
||||
std::vector<FieldRef> right_keys;
|
||||
// if set all valid fields from both left and right input will be output
|
||||
// (and field ref vectors for output fields will be ignored)
|
||||
bool output_all = false;
|
||||
// output fields passed from left input
|
||||
std::vector<FieldRef> left_output;
|
||||
// output fields passed from right input
|
||||
std::vector<FieldRef> right_output;
|
||||
// key comparison function (determines whether a null key is equal another null
|
||||
// key or not)
|
||||
std::vector<JoinKeyCmp> key_cmp;
|
||||
// suffix added to names of output fields coming from left input (used to distinguish,
|
||||
// if necessary, between fields of the same name in left and right input and can be left
|
||||
// empty if there are no name collisions)
|
||||
std::string output_suffix_for_left;
|
||||
// suffix added to names of output fields coming from right input
|
||||
std::string output_suffix_for_right;
|
||||
// residual filter which is applied to matching rows. Rows that do not match
|
||||
// the filter are not included. The filter is applied against the
|
||||
// concatenated input schema (left fields then right fields) and can reference
|
||||
// fields that are not included in the output.
|
||||
Expression filter = literal(true);
|
||||
// whether or not to disable Bloom filters in this join
|
||||
bool disable_bloom_filter = false;
|
||||
};
|
||||
|
||||
/// \brief Make a node which implements asof join operation
|
||||
///
|
||||
/// Note, this API is experimental and will change in the future
|
||||
///
|
||||
/// This node takes one left table and any number of right tables, and asof joins them
|
||||
/// together. Batches produced by each input must be ordered by the "on" key.
|
||||
/// This node will output one row for each row in the left table.
|
||||
class ARROW_EXPORT AsofJoinNodeOptions : public ExecNodeOptions {
|
||||
public:
|
||||
/// \brief Keys for one input table of the AsofJoin operation
|
||||
///
|
||||
/// The keys must be consistent across the input tables:
|
||||
/// Each "on" key must refer to a field of the same type and units across the tables.
|
||||
/// Each "by" key must refer to a list of fields of the same types across the tables.
|
||||
struct Keys {
|
||||
/// \brief "on" key for the join.
|
||||
///
|
||||
/// The input table must be sorted by the "on" key. Must be a single field of a common
|
||||
/// type. Inexact match is used on the "on" key. i.e., a row is considered a match iff
|
||||
/// left_on - tolerance <= right_on <= left_on.
|
||||
/// Currently, the "on" key must be of an integer, date, or timestamp type.
|
||||
FieldRef on_key;
|
||||
/// \brief "by" key for the join.
|
||||
///
|
||||
/// Each input table must have each field of the "by" key. Exact equality is used for
|
||||
/// each field of the "by" key.
|
||||
/// Currently, each field of the "by" key must be of an integer, date, timestamp, or
|
||||
/// base-binary type.
|
||||
std::vector<FieldRef> by_key;
|
||||
};
|
||||
|
||||
AsofJoinNodeOptions(std::vector<Keys> input_keys, int64_t tolerance)
|
||||
: input_keys(std::move(input_keys)), tolerance(tolerance) {}
|
||||
|
||||
/// \brief AsofJoin keys per input table.
|
||||
///
|
||||
/// \see `Keys` for details.
|
||||
std::vector<Keys> input_keys;
|
||||
/// \brief Tolerance for inexact "on" key matching. Must be non-negative.
|
||||
///
|
||||
/// The tolerance is interpreted in the same units as the "on" key.
|
||||
int64_t tolerance;
|
||||
};
|
||||
|
||||
/// \brief Make a node which select top_k/bottom_k rows passed through it
|
||||
///
|
||||
/// All batches pushed to this node will be accumulated, then selected, by the given
|
||||
/// fields. Then sorted batches will be forwarded to the generator in sorted order.
|
||||
class ARROW_EXPORT SelectKSinkNodeOptions : public SinkNodeOptions {
|
||||
public:
|
||||
explicit SelectKSinkNodeOptions(
|
||||
SelectKOptions select_k_options,
|
||||
std::function<Future<std::optional<ExecBatch>>()>* generator)
|
||||
: SinkNodeOptions(generator), select_k_options(std::move(select_k_options)) {}
|
||||
|
||||
/// SelectK options
|
||||
SelectKOptions select_k_options;
|
||||
};
|
||||
|
||||
/// \brief Adapt a Table as a sink node
|
||||
///
|
||||
/// obtains the output of an execution plan to
|
||||
/// a table pointer.
|
||||
class ARROW_EXPORT TableSinkNodeOptions : public ExecNodeOptions {
|
||||
public:
|
||||
explicit TableSinkNodeOptions(std::shared_ptr<Table>* output_table)
|
||||
: output_table(output_table) {}
|
||||
|
||||
std::shared_ptr<Table>* output_table;
|
||||
};
|
||||
|
||||
/// @}
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,53 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/exec/options.h"
|
||||
#include "arrow/record_batch.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/status.h"
|
||||
#include "arrow/type.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
class OrderByImpl {
|
||||
public:
|
||||
virtual ~OrderByImpl() = default;
|
||||
|
||||
virtual void InputReceived(const std::shared_ptr<RecordBatch>& batch) = 0;
|
||||
|
||||
virtual Result<Datum> DoFinish() = 0;
|
||||
|
||||
virtual std::string ToString() const = 0;
|
||||
|
||||
static Result<std::unique_ptr<OrderByImpl>> MakeSort(
|
||||
ExecContext* ctx, const std::shared_ptr<Schema>& output_schema,
|
||||
const SortOptions& options);
|
||||
|
||||
static Result<std::unique_ptr<OrderByImpl>> MakeSelectK(
|
||||
ExecContext* ctx, const std::shared_ptr<Schema>& output_schema,
|
||||
const SelectKOptions& options);
|
||||
};
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,184 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <random>
|
||||
#include "arrow/buffer.h"
|
||||
#include "arrow/compute/exec/util.h"
|
||||
#include "arrow/util/pcg_random.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
class PartitionSort {
|
||||
public:
|
||||
/// \brief Bucket sort rows on partition ids in O(num_rows) time.
|
||||
///
|
||||
/// Include in the output exclusive cummulative sum of bucket sizes.
|
||||
/// This corresponds to ranges in the sorted array containing all row ids for
|
||||
/// each of the partitions.
|
||||
///
|
||||
/// prtn_ranges must be initailized and have at least prtn_ranges + 1 elements
|
||||
/// when this method returns prtn_ranges[i] will contains the total number of
|
||||
/// elements in partitions 0 through i. prtn_ranges[0] will be 0.
|
||||
///
|
||||
/// prtn_id_impl must be a function that takes in a row id (int) and returns
|
||||
/// a partition id (int). The returned partition id must be between 0 and
|
||||
/// num_prtns (exclusive).
|
||||
///
|
||||
/// output_pos_impl is a function that takes in a row id (int) and a position (int)
|
||||
/// in the bucket sorted output. The function should insert the row in the
|
||||
/// output.
|
||||
///
|
||||
/// For example:
|
||||
///
|
||||
/// in_arr: [5, 7, 2, 3, 5, 4]
|
||||
/// num_prtns: 3
|
||||
/// prtn_id_impl: [&in_arr] (int row_id) { return in_arr[row_id] / 3; }
|
||||
/// output_pos_impl: [&out_arr] (int row_id, int pos) { out_arr[pos] = row_id; }
|
||||
///
|
||||
/// After Execution
|
||||
/// out_arr: [2, 5, 3, 5, 4, 7]
|
||||
/// prtn_ranges: [0, 1, 5, 6]
|
||||
template <class INPUT_PRTN_ID_FN, class OUTPUT_POS_FN>
|
||||
static void Eval(int64_t num_rows, int num_prtns, uint16_t* prtn_ranges,
|
||||
INPUT_PRTN_ID_FN prtn_id_impl, OUTPUT_POS_FN output_pos_impl) {
|
||||
ARROW_DCHECK(num_rows > 0 && num_rows <= (1 << 15));
|
||||
ARROW_DCHECK(num_prtns >= 1 && num_prtns <= (1 << 15));
|
||||
|
||||
memset(prtn_ranges, 0, (num_prtns + 1) * sizeof(uint16_t));
|
||||
|
||||
for (int64_t i = 0; i < num_rows; ++i) {
|
||||
int prtn_id = static_cast<int>(prtn_id_impl(i));
|
||||
++prtn_ranges[prtn_id + 1];
|
||||
}
|
||||
|
||||
uint16_t sum = 0;
|
||||
for (int i = 0; i < num_prtns; ++i) {
|
||||
uint16_t sum_next = sum + prtn_ranges[i + 1];
|
||||
prtn_ranges[i + 1] = sum;
|
||||
sum = sum_next;
|
||||
}
|
||||
|
||||
for (int64_t i = 0; i < num_rows; ++i) {
|
||||
int prtn_id = static_cast<int>(prtn_id_impl(i));
|
||||
int pos = prtn_ranges[prtn_id + 1]++;
|
||||
output_pos_impl(i, pos);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// \brief A control for synchronizing threads on a partitionable workload
|
||||
class PartitionLocks {
|
||||
public:
|
||||
PartitionLocks();
|
||||
~PartitionLocks();
|
||||
/// \brief Initializes the control, must be called before use
|
||||
///
|
||||
/// \param num_threads Maximum number of threads that will access the partitions
|
||||
/// \param num_prtns Number of partitions to synchronize
|
||||
void Init(size_t num_threads, int num_prtns);
|
||||
/// \brief Cleans up the control, it should not be used after this call
|
||||
void CleanUp();
|
||||
/// \brief Acquire a partition to work on one
|
||||
///
|
||||
/// \param thread_id The index of the thread trying to acquire the partition lock
|
||||
/// \param num_prtns Length of prtns_to_try, must be <= num_prtns used in Init
|
||||
/// \param prtns_to_try An array of partitions that still have remaining work
|
||||
/// \param limit_retries If false, this method will spinwait forever until success
|
||||
/// \param max_retries Max times to attempt checking out work before returning false
|
||||
/// \param[out] locked_prtn_id The id of the partition locked
|
||||
/// \param[out] locked_prtn_id_pos The index of the partition locked in prtns_to_try
|
||||
/// \return True if a partition was locked, false if max_retries was attempted
|
||||
/// without successfully acquiring a lock
|
||||
///
|
||||
/// This method is thread safe
|
||||
bool AcquirePartitionLock(size_t thread_id, int num_prtns, const int* prtns_to_try,
|
||||
bool limit_retries, int max_retries, int* locked_prtn_id,
|
||||
int* locked_prtn_id_pos);
|
||||
/// \brief Release a partition so that other threads can work on it
|
||||
void ReleasePartitionLock(int prtn_id);
|
||||
|
||||
// Executes (synchronously and using current thread) the same operation on a set of
|
||||
// multiple partitions. Tries to minimize partition locking overhead by randomizing and
|
||||
// adjusting order in which partitions are processed.
|
||||
//
|
||||
// PROCESS_PRTN_FN is a callback which will be executed for each partition after
|
||||
// acquiring the lock for that partition. It gets partition id as an argument.
|
||||
// IS_PRTN_EMPTY_FN is a callback which filters out (when returning true) partitions
|
||||
// with specific ids from processing.
|
||||
//
|
||||
template <typename IS_PRTN_EMPTY_FN, typename PROCESS_PRTN_FN>
|
||||
Status ForEachPartition(size_t thread_id,
|
||||
/*scratch space buffer with space for one element per partition;
|
||||
dirty in and dirty out*/
|
||||
int* temp_unprocessed_prtns, IS_PRTN_EMPTY_FN is_prtn_empty_fn,
|
||||
PROCESS_PRTN_FN process_prtn_fn) {
|
||||
int num_unprocessed_partitions = 0;
|
||||
for (int i = 0; i < num_prtns_; ++i) {
|
||||
bool is_prtn_empty = is_prtn_empty_fn(i);
|
||||
if (!is_prtn_empty) {
|
||||
temp_unprocessed_prtns[num_unprocessed_partitions++] = i;
|
||||
}
|
||||
}
|
||||
while (num_unprocessed_partitions > 0) {
|
||||
int locked_prtn_id;
|
||||
int locked_prtn_id_pos;
|
||||
AcquirePartitionLock(thread_id, num_unprocessed_partitions, temp_unprocessed_prtns,
|
||||
/*limit_retries=*/false, /*max_retries=*/-1, &locked_prtn_id,
|
||||
&locked_prtn_id_pos);
|
||||
{
|
||||
class AutoReleaseLock {
|
||||
public:
|
||||
AutoReleaseLock(PartitionLocks* locks, int prtn_id)
|
||||
: locks(locks), prtn_id(prtn_id) {}
|
||||
~AutoReleaseLock() { locks->ReleasePartitionLock(prtn_id); }
|
||||
PartitionLocks* locks;
|
||||
int prtn_id;
|
||||
} auto_release_lock(this, locked_prtn_id);
|
||||
ARROW_RETURN_NOT_OK(process_prtn_fn(locked_prtn_id));
|
||||
}
|
||||
if (locked_prtn_id_pos < num_unprocessed_partitions - 1) {
|
||||
temp_unprocessed_prtns[locked_prtn_id_pos] =
|
||||
temp_unprocessed_prtns[num_unprocessed_partitions - 1];
|
||||
}
|
||||
--num_unprocessed_partitions;
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic<bool>* lock_ptr(int prtn_id);
|
||||
int random_int(size_t thread_id, int num_values);
|
||||
|
||||
struct PartitionLock {
|
||||
static constexpr int kCacheLineBytes = 64;
|
||||
std::atomic<bool> lock;
|
||||
uint8_t padding[kCacheLineBytes];
|
||||
};
|
||||
int num_prtns_;
|
||||
std::unique_ptr<PartitionLock[]> locks_;
|
||||
std::unique_ptr<arrow::random::pcg32_fast[]> rngs_;
|
||||
};
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,161 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include "arrow/compute/exec.h"
|
||||
#include "arrow/compute/exec/task_util.h"
|
||||
#include "arrow/compute/exec/util.h"
|
||||
#include "arrow/io/interfaces.h"
|
||||
#include "arrow/util/async_util.h"
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace arrow {
|
||||
|
||||
using io::IOContext;
|
||||
namespace compute {
|
||||
struct ARROW_EXPORT QueryOptions {
|
||||
QueryOptions();
|
||||
|
||||
/// \brief Should the plan use a legacy batching strategy
|
||||
///
|
||||
/// This is currently in place only to support the Scanner::ToTable
|
||||
/// method. This method relies on batch indices from the scanner
|
||||
/// remaining consistent. This is impractical in the ExecPlan which
|
||||
/// might slice batches as needed (e.g. for a join)
|
||||
///
|
||||
/// However, it still works for simple plans and this is the only way
|
||||
/// we have at the moment for maintaining implicit order.
|
||||
bool use_legacy_batching;
|
||||
};
|
||||
|
||||
class ARROW_EXPORT QueryContext {
|
||||
public:
|
||||
QueryContext(QueryOptions opts = {},
|
||||
ExecContext exec_context = *default_exec_context());
|
||||
|
||||
Status Init(size_t max_num_threads, util::AsyncTaskScheduler* scheduler);
|
||||
|
||||
const ::arrow::internal::CpuInfo* cpu_info() const;
|
||||
int64_t hardware_flags() const;
|
||||
const QueryOptions& options() const { return options_; }
|
||||
MemoryPool* memory_pool() const { return exec_context_.memory_pool(); }
|
||||
::arrow::internal::Executor* executor() const { return exec_context_.executor(); }
|
||||
ExecContext* exec_context() { return &exec_context_; }
|
||||
IOContext* io_context() { return &io_context_; }
|
||||
TaskScheduler* scheduler() { return task_scheduler_.get(); }
|
||||
util::AsyncTaskScheduler* async_scheduler() { return async_scheduler_; }
|
||||
|
||||
size_t GetThreadIndex();
|
||||
size_t max_concurrency() const;
|
||||
Result<util::TempVectorStack*> GetTempStack(size_t thread_index);
|
||||
|
||||
/// \brief Start an external task
|
||||
///
|
||||
/// This should be avoided if possible. It is kept in for now for legacy
|
||||
/// purposes. This should be called before the external task is started. If
|
||||
/// a valid future is returned then it should be marked complete when the
|
||||
/// external task has finished.
|
||||
///
|
||||
/// \return an invalid future if the plan has already ended, otherwise this
|
||||
/// returns a future that must be completed when the external task
|
||||
/// finishes.
|
||||
Result<Future<>> BeginExternalTask();
|
||||
|
||||
/// \brief Add a single function as a task to the query's task group
|
||||
/// on the compute threadpool.
|
||||
///
|
||||
/// \param fn The task to run. Takes no arguments and returns a Status.
|
||||
Status ScheduleTask(std::function<Status()> fn);
|
||||
/// \brief Add a single function as a task to the query's task group
|
||||
/// on the compute threadpool.
|
||||
///
|
||||
/// \param fn The task to run. Takes the thread index and returns a Status.
|
||||
Status ScheduleTask(std::function<Status(size_t)> fn);
|
||||
/// \brief Add a single function as a task to the query's task group on
|
||||
/// the IO thread pool
|
||||
///
|
||||
/// \param fn The task to run. Returns a status.
|
||||
Status ScheduleIOTask(std::function<Status()> fn);
|
||||
|
||||
// Register/Start TaskGroup is a way of performing a "Parallel For" pattern:
|
||||
// - The task function takes the thread index and the index of the task
|
||||
// - The on_finished function takes the thread index
|
||||
// Returns an integer ID that will be used to reference the task group in
|
||||
// StartTaskGroup. At runtime, call StartTaskGroup with the ID and the number of times
|
||||
// you'd like the task to be executed. The need to register a task group before use will
|
||||
// be removed after we rewrite the scheduler.
|
||||
/// \brief Register a "parallel for" task group with the scheduler
|
||||
///
|
||||
/// \param task The function implementing the task. Takes the thread_index and
|
||||
/// the task index.
|
||||
/// \param on_finished The function that gets run once all tasks have been completed.
|
||||
/// Takes the thread_index.
|
||||
///
|
||||
/// Must be called inside of ExecNode::Init.
|
||||
int RegisterTaskGroup(std::function<Status(size_t, int64_t)> task,
|
||||
std::function<Status(size_t)> on_finished);
|
||||
|
||||
/// \brief Start the task group with the specified ID. This can only
|
||||
/// be called once per task_group_id.
|
||||
///
|
||||
/// \param task_group_id The ID of the task group to run
|
||||
/// \param num_tasks The number of times to run the task
|
||||
Status StartTaskGroup(int task_group_id, int64_t num_tasks);
|
||||
|
||||
// This is an RAII class for keeping track of in-flight file IO. Useful for getting
|
||||
// an estimate of memory use, and how much memory we expect to be freed soon.
|
||||
// Returned by ReportTempFileIO.
|
||||
struct [[nodiscard]] TempFileIOMark {
|
||||
QueryContext* ctx_;
|
||||
size_t bytes_;
|
||||
|
||||
TempFileIOMark(QueryContext* ctx, size_t bytes) : ctx_(ctx), bytes_(bytes) {
|
||||
ctx_->in_flight_bytes_to_disk_.fetch_add(bytes_, std::memory_order_acquire);
|
||||
}
|
||||
|
||||
ARROW_DISALLOW_COPY_AND_ASSIGN(TempFileIOMark);
|
||||
|
||||
~TempFileIOMark() {
|
||||
ctx_->in_flight_bytes_to_disk_.fetch_sub(bytes_, std::memory_order_release);
|
||||
}
|
||||
};
|
||||
|
||||
TempFileIOMark ReportTempFileIO(size_t bytes) { return {this, bytes}; }
|
||||
|
||||
size_t GetCurrentTempFileIO() { return in_flight_bytes_to_disk_.load(); }
|
||||
|
||||
private:
|
||||
QueryOptions options_;
|
||||
// To be replaced with Acero-specific context once scheduler is done and
|
||||
// we don't need ExecContext for kernels
|
||||
ExecContext exec_context_;
|
||||
IOContext io_context_;
|
||||
|
||||
util::AsyncTaskScheduler* async_scheduler_ = NULLPTR;
|
||||
std::unique_ptr<TaskScheduler> task_scheduler_ = TaskScheduler::Make();
|
||||
|
||||
ThreadIndexer thread_indexer_;
|
||||
struct ThreadLocalData {
|
||||
bool is_init = false;
|
||||
util::TempVectorStack stack;
|
||||
};
|
||||
std::vector<ThreadLocalData> tld_;
|
||||
|
||||
std::atomic<size_t> in_flight_bytes_to_disk_{0};
|
||||
};
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,226 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/light_array.h" // for KeyColumnMetadata
|
||||
#include "arrow/type.h" // for DataType, FieldRef, Field and Schema
|
||||
|
||||
namespace arrow {
|
||||
|
||||
using internal::checked_cast;
|
||||
|
||||
namespace compute {
|
||||
|
||||
// Identifiers for all different row schemas that are used in a join
|
||||
//
|
||||
enum class HashJoinProjection : int {
|
||||
INPUT = 0,
|
||||
KEY = 1,
|
||||
PAYLOAD = 2,
|
||||
FILTER = 3,
|
||||
OUTPUT = 4
|
||||
};
|
||||
|
||||
struct SchemaProjectionMap {
|
||||
static constexpr int kMissingField = -1;
|
||||
int num_cols;
|
||||
const int* source_to_base;
|
||||
const int* base_to_target;
|
||||
inline int get(int i) const {
|
||||
ARROW_DCHECK(i >= 0 && i < num_cols);
|
||||
ARROW_DCHECK(source_to_base[i] != kMissingField);
|
||||
return base_to_target[source_to_base[i]];
|
||||
}
|
||||
};
|
||||
|
||||
/// Helper class for managing different projections of the same row schema.
|
||||
/// Used to efficiently map any field in one projection to a corresponding field in
|
||||
/// another projection.
|
||||
/// Materialized mappings are generated lazily at the time of the first access.
|
||||
/// Thread-safe apart from initialization.
|
||||
template <typename ProjectionIdEnum>
|
||||
class SchemaProjectionMaps {
|
||||
public:
|
||||
static constexpr int kMissingField = -1;
|
||||
|
||||
Status Init(ProjectionIdEnum full_schema_handle, const Schema& schema,
|
||||
const std::vector<ProjectionIdEnum>& projection_handles,
|
||||
const std::vector<const std::vector<FieldRef>*>& projections) {
|
||||
ARROW_DCHECK(projection_handles.size() == projections.size());
|
||||
ARROW_RETURN_NOT_OK(RegisterSchema(full_schema_handle, schema));
|
||||
for (size_t i = 0; i < projections.size(); ++i) {
|
||||
ARROW_RETURN_NOT_OK(
|
||||
RegisterProjectedSchema(projection_handles[i], *(projections[i]), schema));
|
||||
}
|
||||
RegisterEnd();
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
int num_cols(ProjectionIdEnum schema_handle) const {
|
||||
int id = schema_id(schema_handle);
|
||||
return static_cast<int>(schemas_[id].second.data_types.size());
|
||||
}
|
||||
|
||||
bool is_empty(ProjectionIdEnum schema_handle) const {
|
||||
return num_cols(schema_handle) == 0;
|
||||
}
|
||||
|
||||
const std::string& field_name(ProjectionIdEnum schema_handle, int field_id) const {
|
||||
int id = schema_id(schema_handle);
|
||||
return schemas_[id].second.field_names[field_id];
|
||||
}
|
||||
|
||||
const std::shared_ptr<DataType>& data_type(ProjectionIdEnum schema_handle,
|
||||
int field_id) const {
|
||||
int id = schema_id(schema_handle);
|
||||
return schemas_[id].second.data_types[field_id];
|
||||
}
|
||||
|
||||
const std::vector<std::shared_ptr<DataType>>& data_types(
|
||||
ProjectionIdEnum schema_handle) const {
|
||||
int id = schema_id(schema_handle);
|
||||
return schemas_[id].second.data_types;
|
||||
}
|
||||
|
||||
SchemaProjectionMap map(ProjectionIdEnum from, ProjectionIdEnum to) const {
|
||||
int id_from = schema_id(from);
|
||||
int id_to = schema_id(to);
|
||||
SchemaProjectionMap result;
|
||||
result.num_cols = num_cols(from);
|
||||
result.source_to_base = mappings_[id_from].data();
|
||||
result.base_to_target = inverse_mappings_[id_to].data();
|
||||
return result;
|
||||
}
|
||||
|
||||
protected:
|
||||
struct FieldInfos {
|
||||
std::vector<int> field_paths;
|
||||
std::vector<std::string> field_names;
|
||||
std::vector<std::shared_ptr<DataType>> data_types;
|
||||
};
|
||||
|
||||
Status RegisterSchema(ProjectionIdEnum handle, const Schema& schema) {
|
||||
FieldInfos out_fields;
|
||||
const FieldVector& in_fields = schema.fields();
|
||||
out_fields.field_paths.resize(in_fields.size());
|
||||
out_fields.field_names.resize(in_fields.size());
|
||||
out_fields.data_types.resize(in_fields.size());
|
||||
for (size_t i = 0; i < in_fields.size(); ++i) {
|
||||
const std::string& name = in_fields[i]->name();
|
||||
const std::shared_ptr<DataType>& type = in_fields[i]->type();
|
||||
out_fields.field_paths[i] = static_cast<int>(i);
|
||||
out_fields.field_names[i] = name;
|
||||
out_fields.data_types[i] = type;
|
||||
}
|
||||
schemas_.push_back(std::make_pair(handle, out_fields));
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status RegisterProjectedSchema(ProjectionIdEnum handle,
|
||||
const std::vector<FieldRef>& selected_fields,
|
||||
const Schema& full_schema) {
|
||||
FieldInfos out_fields;
|
||||
const FieldVector& in_fields = full_schema.fields();
|
||||
out_fields.field_paths.resize(selected_fields.size());
|
||||
out_fields.field_names.resize(selected_fields.size());
|
||||
out_fields.data_types.resize(selected_fields.size());
|
||||
for (size_t i = 0; i < selected_fields.size(); ++i) {
|
||||
// All fields must be found in schema without ambiguity
|
||||
ARROW_ASSIGN_OR_RAISE(auto match, selected_fields[i].FindOne(full_schema));
|
||||
const std::string& name = in_fields[match[0]]->name();
|
||||
const std::shared_ptr<DataType>& type = in_fields[match[0]]->type();
|
||||
out_fields.field_paths[i] = match[0];
|
||||
out_fields.field_names[i] = name;
|
||||
out_fields.data_types[i] = type;
|
||||
}
|
||||
schemas_.push_back(std::make_pair(handle, out_fields));
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
void RegisterEnd() {
|
||||
size_t size = schemas_.size();
|
||||
mappings_.resize(size);
|
||||
inverse_mappings_.resize(size);
|
||||
int id_base = 0;
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
GenerateMapForProjection(static_cast<int>(i), id_base);
|
||||
}
|
||||
}
|
||||
|
||||
int schema_id(ProjectionIdEnum schema_handle) const {
|
||||
for (size_t i = 0; i < schemas_.size(); ++i) {
|
||||
if (schemas_[i].first == schema_handle) {
|
||||
return static_cast<int>(i);
|
||||
}
|
||||
}
|
||||
// We should never get here
|
||||
ARROW_DCHECK(false);
|
||||
return -1;
|
||||
}
|
||||
|
||||
void GenerateMapForProjection(int id_proj, int id_base) {
|
||||
int num_cols_proj = static_cast<int>(schemas_[id_proj].second.data_types.size());
|
||||
int num_cols_base = static_cast<int>(schemas_[id_base].second.data_types.size());
|
||||
|
||||
std::vector<int>& mapping = mappings_[id_proj];
|
||||
std::vector<int>& inverse_mapping = inverse_mappings_[id_proj];
|
||||
mapping.resize(num_cols_proj);
|
||||
inverse_mapping.resize(num_cols_base);
|
||||
|
||||
if (id_proj == id_base) {
|
||||
for (int i = 0; i < num_cols_base; ++i) {
|
||||
mapping[i] = inverse_mapping[i] = i;
|
||||
}
|
||||
} else {
|
||||
const FieldInfos& fields_proj = schemas_[id_proj].second;
|
||||
const FieldInfos& fields_base = schemas_[id_base].second;
|
||||
for (int i = 0; i < num_cols_base; ++i) {
|
||||
inverse_mapping[i] = SchemaProjectionMap::kMissingField;
|
||||
}
|
||||
for (int i = 0; i < num_cols_proj; ++i) {
|
||||
int field_id = SchemaProjectionMap::kMissingField;
|
||||
for (int j = 0; j < num_cols_base; ++j) {
|
||||
if (fields_proj.field_paths[i] == fields_base.field_paths[j]) {
|
||||
field_id = j;
|
||||
// If there are multiple matches for the same input field,
|
||||
// it will be mapped to the first match.
|
||||
break;
|
||||
}
|
||||
}
|
||||
ARROW_DCHECK(field_id != SchemaProjectionMap::kMissingField);
|
||||
mapping[i] = field_id;
|
||||
inverse_mapping[field_id] = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// vector used as a mapping from ProjectionIdEnum to fields
|
||||
std::vector<std::pair<ProjectionIdEnum, FieldInfos>> schemas_;
|
||||
std::vector<std::vector<int>> mappings_;
|
||||
std::vector<std::vector<int>> inverse_mappings_;
|
||||
};
|
||||
|
||||
using HashJoinProjectionMaps = SchemaProjectionMaps<HashJoinProjection>;
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,761 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include "arrow/compute/exec/key_map.h"
|
||||
#include "arrow/compute/exec/options.h"
|
||||
#include "arrow/compute/exec/partition_util.h"
|
||||
#include "arrow/compute/exec/schema_util.h"
|
||||
#include "arrow/compute/exec/task_util.h"
|
||||
#include "arrow/compute/kernels/row_encoder.h"
|
||||
#include "arrow/compute/light_array.h"
|
||||
#include "arrow/compute/row/encode_internal.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
class RowArrayAccessor {
|
||||
public:
|
||||
// Find the index of this varbinary column within the sequence of all
|
||||
// varbinary columns encoded in rows.
|
||||
//
|
||||
static int VarbinaryColumnId(const RowTableMetadata& row_metadata, int column_id);
|
||||
|
||||
// Calculate how many rows to skip from the tail of the
|
||||
// sequence of selected rows, such that the total size of skipped rows is at
|
||||
// least equal to the size specified by the caller. Skipping of the tail rows
|
||||
// is used to allow for faster processing by the caller of remaining rows
|
||||
// without checking buffer bounds (useful with SIMD or fixed size memory loads
|
||||
// and stores).
|
||||
//
|
||||
static int NumRowsToSkip(const RowTableImpl& rows, int column_id, int num_rows,
|
||||
const uint32_t* row_ids, int num_tail_bytes_to_skip);
|
||||
|
||||
// The supplied lambda will be called for each row in the given list of rows.
|
||||
// The arguments given to it will be:
|
||||
// - index of a row (within the set of selected rows),
|
||||
// - pointer to the value,
|
||||
// - byte length of the value.
|
||||
//
|
||||
// The information about nulls (validity bitmap) is not used in this call and
|
||||
// has to be processed separately.
|
||||
//
|
||||
template <class PROCESS_VALUE_FN>
|
||||
static void Visit(const RowTableImpl& rows, int column_id, int num_rows,
|
||||
const uint32_t* row_ids, PROCESS_VALUE_FN process_value_fn);
|
||||
|
||||
// The supplied lambda will be called for each row in the given list of rows.
|
||||
// The arguments given to it will be:
|
||||
// - index of a row (within the set of selected rows),
|
||||
// - byte 0xFF if the null is set for the row or 0x00 otherwise.
|
||||
//
|
||||
template <class PROCESS_VALUE_FN>
|
||||
static void VisitNulls(const RowTableImpl& rows, int column_id, int num_rows,
|
||||
const uint32_t* row_ids, PROCESS_VALUE_FN process_value_fn);
|
||||
|
||||
private:
|
||||
#if defined(ARROW_HAVE_AVX2)
|
||||
// This is equivalent to Visit method, but processing 8 rows at a time in a
|
||||
// loop.
|
||||
// Returns the number of processed rows, which may be less than requested (up
|
||||
// to 7 rows at the end may be skipped).
|
||||
//
|
||||
template <class PROCESS_8_VALUES_FN>
|
||||
static int Visit_avx2(const RowTableImpl& rows, int column_id, int num_rows,
|
||||
const uint32_t* row_ids, PROCESS_8_VALUES_FN process_8_values_fn);
|
||||
|
||||
// This is equivalent to VisitNulls method, but processing 8 rows at a time in
|
||||
// a loop. Returns the number of processed rows, which may be less than
|
||||
// requested (up to 7 rows at the end may be skipped).
|
||||
//
|
||||
template <class PROCESS_8_VALUES_FN>
|
||||
static int VisitNulls_avx2(const RowTableImpl& rows, int column_id, int num_rows,
|
||||
const uint32_t* row_ids,
|
||||
PROCESS_8_VALUES_FN process_8_values_fn);
|
||||
#endif
|
||||
};
|
||||
|
||||
// Write operations (appending batch rows) must not be called by more than one
|
||||
// thread at the same time.
|
||||
//
|
||||
// Read operations (row comparison, column decoding)
|
||||
// can be called by multiple threads concurrently.
|
||||
//
|
||||
struct RowArray {
|
||||
RowArray() : is_initialized_(false) {}
|
||||
|
||||
Status InitIfNeeded(MemoryPool* pool, const ExecBatch& batch);
|
||||
Status InitIfNeeded(MemoryPool* pool, const RowTableMetadata& row_metadata);
|
||||
|
||||
Status AppendBatchSelection(MemoryPool* pool, const ExecBatch& batch, int begin_row_id,
|
||||
int end_row_id, int num_row_ids, const uint16_t* row_ids,
|
||||
std::vector<KeyColumnArray>& temp_column_arrays);
|
||||
|
||||
// This can only be called for a minibatch.
|
||||
//
|
||||
void Compare(const ExecBatch& batch, int begin_row_id, int end_row_id, int num_selected,
|
||||
const uint16_t* batch_selection_maybe_null, const uint32_t* array_row_ids,
|
||||
uint32_t* out_num_not_equal, uint16_t* out_not_equal_selection,
|
||||
int64_t hardware_flags, util::TempVectorStack* temp_stack,
|
||||
std::vector<KeyColumnArray>& temp_column_arrays,
|
||||
uint8_t* out_match_bitvector_maybe_null = NULLPTR);
|
||||
|
||||
// TODO: add AVX2 version
|
||||
//
|
||||
Status DecodeSelected(ResizableArrayData* target, int column_id, int num_rows_to_append,
|
||||
const uint32_t* row_ids, MemoryPool* pool) const;
|
||||
|
||||
void DebugPrintToFile(const char* filename, bool print_sorted) const;
|
||||
|
||||
int64_t num_rows() const { return is_initialized_ ? rows_.length() : 0; }
|
||||
|
||||
bool is_initialized_;
|
||||
RowTableEncoder encoder_;
|
||||
RowTableImpl rows_;
|
||||
RowTableImpl rows_temp_;
|
||||
};
|
||||
|
||||
// Implements concatenating multiple row arrays into a single one, using
|
||||
// potentially multiple threads, each processing a single input row array.
|
||||
//
|
||||
class RowArrayMerge {
|
||||
public:
|
||||
// Calculate total number of rows and size in bytes for merged sequence of
|
||||
// rows and allocate memory for it.
|
||||
//
|
||||
// If the rows are of varying length, initialize in the offset array the first
|
||||
// entry for the write area for each input row array. Leave all other
|
||||
// offsets and buffers uninitialized.
|
||||
//
|
||||
// All input sources must be initialized, but they can contain zero rows.
|
||||
//
|
||||
// Output in vector the first target row id for each source (exclusive
|
||||
// cummulative sum of number of rows in sources). This output is optional,
|
||||
// caller can pass in nullptr to indicate that it is not needed.
|
||||
//
|
||||
static Status PrepareForMerge(RowArray* target, const std::vector<RowArray*>& sources,
|
||||
std::vector<int64_t>* first_target_row_id,
|
||||
MemoryPool* pool);
|
||||
|
||||
// Copy rows from source array to target array.
|
||||
// Both arrays must have the same row metadata.
|
||||
// Target array must already have the memory reserved in all internal buffers
|
||||
// for the copy of the rows.
|
||||
//
|
||||
// Copy of the rows will occupy the same amount of space in the target array
|
||||
// buffers as in the source array, but in the target array we pick at what row
|
||||
// position and offset we start writing.
|
||||
//
|
||||
// Optionally, the rows may be reordered during copy according to the
|
||||
// provided permutation, which represents some sorting order of source rows.
|
||||
// Nth element of the permutation array is the source row index for the Nth
|
||||
// row written into target array. If permutation is missing (null), then the
|
||||
// order of source rows will remain unchanged.
|
||||
//
|
||||
// In case of varying length rows, we purposefully skip outputting of N+1 (one
|
||||
// after last) offset, to allow concurrent copies of rows done to adjacent
|
||||
// ranges in the target array. This offset should already contain the right
|
||||
// value after calling the method preparing target array for merge (which
|
||||
// initializes boundary offsets for target row ranges for each source).
|
||||
//
|
||||
static void MergeSingle(RowArray* target, const RowArray& source,
|
||||
int64_t first_target_row_id,
|
||||
const int64_t* source_rows_permutation);
|
||||
|
||||
private:
|
||||
// Copy rows from source array to a region of the target array.
|
||||
// This implementation is for fixed length rows.
|
||||
// Null information needs to be handled separately.
|
||||
//
|
||||
static void CopyFixedLength(RowTableImpl* target, const RowTableImpl& source,
|
||||
int64_t first_target_row_id,
|
||||
const int64_t* source_rows_permutation);
|
||||
|
||||
// Copy rows from source array to a region of the target array.
|
||||
// This implementation is for varying length rows.
|
||||
// Null information needs to be handled separately.
|
||||
//
|
||||
static void CopyVaryingLength(RowTableImpl* target, const RowTableImpl& source,
|
||||
int64_t first_target_row_id,
|
||||
int64_t first_target_row_offset,
|
||||
const int64_t* source_rows_permutation);
|
||||
|
||||
// Copy null information from rows from source array to a region of the target
|
||||
// array.
|
||||
//
|
||||
static void CopyNulls(RowTableImpl* target, const RowTableImpl& source,
|
||||
int64_t first_target_row_id,
|
||||
const int64_t* source_rows_permutation);
|
||||
};
|
||||
|
||||
// Implements merging of multiple SwissTables into a single one, using
|
||||
// potentially multiple threads, each processing a single input source.
|
||||
//
|
||||
// Each source should correspond to a range of original hashes.
|
||||
// A row belongs to a source with index determined by K highest bits of
|
||||
// original hash. That means that the number of sources must be a power of 2.
|
||||
//
|
||||
// We assume that the hash values used and stored inside source tables
|
||||
// have K highest bits removed from the original hash in order to avoid huge
|
||||
// number of hash collisions that would occur otherwise.
|
||||
// These bits will be reinserted back (original hashes will be used) when
|
||||
// merging into target.
|
||||
//
|
||||
class SwissTableMerge {
|
||||
public:
|
||||
// Calculate total number of blocks for merged table.
|
||||
// Allocate buffers sized accordingly and initialize empty target table.
|
||||
//
|
||||
// All input sources must be initialized, but they can be empty.
|
||||
//
|
||||
// Output in a vector the first target group id for each source (exclusive
|
||||
// cummulative sum of number of groups in sources). This output is optional,
|
||||
// caller can pass in nullptr to indicate that it is not needed.
|
||||
//
|
||||
static Status PrepareForMerge(SwissTable* target,
|
||||
const std::vector<SwissTable*>& sources,
|
||||
std::vector<uint32_t>* first_target_group_id,
|
||||
MemoryPool* pool);
|
||||
|
||||
// Copy all entries from source to a range of blocks (partition) of target.
|
||||
//
|
||||
// During copy, adjust group ids from source by adding provided base id.
|
||||
//
|
||||
// Skip entries from source that would cross partition boundaries (range of
|
||||
// blocks) when inserted into target. Save their data in output vector for
|
||||
// processing later. We postpone inserting these overflow entries in order to
|
||||
// allow concurrent processing of all partitions. Overflow entries will be
|
||||
// handled by a single-thread afterwards.
|
||||
//
|
||||
static void MergePartition(SwissTable* target, const SwissTable* source,
|
||||
uint32_t partition_id, int num_partition_bits,
|
||||
uint32_t base_group_id,
|
||||
std::vector<uint32_t>* overflow_group_ids,
|
||||
std::vector<uint32_t>* overflow_hashes);
|
||||
|
||||
// Single-threaded processing of remaining groups, that could not be
|
||||
// inserted in partition merge phase
|
||||
// (due to entries from one partition spilling over due to full blocks into
|
||||
// the next partition).
|
||||
//
|
||||
static void InsertNewGroups(SwissTable* target, const std::vector<uint32_t>& group_ids,
|
||||
const std::vector<uint32_t>& hashes);
|
||||
|
||||
private:
|
||||
// Insert a new group id.
|
||||
//
|
||||
// Assumes that there are enough slots in the target
|
||||
// and there is no need to resize it.
|
||||
//
|
||||
// Max block id can be provided, in which case the search for an empty slot to
|
||||
// insert new entry to will stop after visiting that block.
|
||||
//
|
||||
// Max block id value greater or equal to the number of blocks guarantees that
|
||||
// the search will not be stopped.
|
||||
//
|
||||
static inline bool InsertNewGroup(SwissTable* target, uint64_t group_id, uint32_t hash,
|
||||
int64_t max_block_id);
|
||||
};
|
||||
|
||||
struct SwissTableWithKeys {
|
||||
struct Input {
|
||||
Input(const ExecBatch* in_batch, int in_batch_start_row, int in_batch_end_row,
|
||||
util::TempVectorStack* in_temp_stack,
|
||||
std::vector<KeyColumnArray>* in_temp_column_arrays);
|
||||
|
||||
Input(const ExecBatch* in_batch, util::TempVectorStack* in_temp_stack,
|
||||
std::vector<KeyColumnArray>* in_temp_column_arrays);
|
||||
|
||||
Input(const ExecBatch* in_batch, int in_num_selected, const uint16_t* in_selection,
|
||||
util::TempVectorStack* in_temp_stack,
|
||||
std::vector<KeyColumnArray>* in_temp_column_arrays,
|
||||
std::vector<uint32_t>* in_temp_group_ids);
|
||||
|
||||
Input(const Input& base, int num_rows_to_skip, int num_rows_to_include);
|
||||
|
||||
const ExecBatch* batch;
|
||||
// Window of the batch to operate on.
|
||||
// The window information is only used if row selection is null.
|
||||
//
|
||||
int batch_start_row;
|
||||
int batch_end_row;
|
||||
// Optional selection.
|
||||
// Used instead of window of the batch if not null.
|
||||
//
|
||||
int num_selected;
|
||||
const uint16_t* selection_maybe_null;
|
||||
// Thread specific scratch buffers for storing temporary data.
|
||||
//
|
||||
util::TempVectorStack* temp_stack;
|
||||
std::vector<KeyColumnArray>* temp_column_arrays;
|
||||
std::vector<uint32_t>* temp_group_ids;
|
||||
};
|
||||
|
||||
Status Init(int64_t hardware_flags, MemoryPool* pool);
|
||||
|
||||
void InitCallbacks();
|
||||
|
||||
static void Hash(Input* input, uint32_t* hashes, int64_t hardware_flags);
|
||||
|
||||
// If input uses selection, then hashes array must have one element for every
|
||||
// row in the whole (unfiltered and not spliced) input exec batch. Otherwise,
|
||||
// there must be one element in hashes array for every value in the window of
|
||||
// the exec batch specified by input.
|
||||
//
|
||||
// Output arrays will contain one element for every selected batch row in
|
||||
// input (selected either by selection vector if provided or input window
|
||||
// otherwise).
|
||||
//
|
||||
void MapReadOnly(Input* input, const uint32_t* hashes, uint8_t* match_bitvector,
|
||||
uint32_t* key_ids);
|
||||
Status MapWithInserts(Input* input, const uint32_t* hashes, uint32_t* key_ids);
|
||||
|
||||
SwissTable* swiss_table() { return &swiss_table_; }
|
||||
const SwissTable* swiss_table() const { return &swiss_table_; }
|
||||
RowArray* keys() { return &keys_; }
|
||||
const RowArray* keys() const { return &keys_; }
|
||||
|
||||
private:
|
||||
void EqualCallback(int num_keys, const uint16_t* selection_maybe_null,
|
||||
const uint32_t* group_ids, uint32_t* out_num_keys_mismatch,
|
||||
uint16_t* out_selection_mismatch, void* callback_ctx);
|
||||
Status AppendCallback(int num_keys, const uint16_t* selection, void* callback_ctx);
|
||||
Status Map(Input* input, bool insert_missing, const uint32_t* hashes,
|
||||
uint8_t* match_bitvector_maybe_null, uint32_t* key_ids);
|
||||
|
||||
SwissTable::EqualImpl equal_impl_;
|
||||
SwissTable::AppendImpl append_impl_;
|
||||
|
||||
SwissTable swiss_table_;
|
||||
RowArray keys_;
|
||||
};
|
||||
|
||||
// Enhances SwissTableWithKeys with the following structures used by hash join:
|
||||
// - storage of payloads (that unlike keys do not have to be unique)
|
||||
// - mapping from a key to all inserted payloads corresponding to it (we can
|
||||
// store multiple rows corresponding to a single key)
|
||||
// - bit-vectors for keeping track of whether each payload had a match during
|
||||
// evaluation of join.
|
||||
//
|
||||
class SwissTableForJoin {
|
||||
friend class SwissTableForJoinBuild;
|
||||
|
||||
public:
|
||||
void UpdateHasMatchForKeys(int64_t thread_id, int num_rows, const uint32_t* key_ids);
|
||||
void MergeHasMatch();
|
||||
|
||||
const SwissTableWithKeys* keys() const { return &map_; }
|
||||
SwissTableWithKeys* keys() { return &map_; }
|
||||
const RowArray* payloads() const { return no_payload_columns_ ? NULLPTR : &payloads_; }
|
||||
const uint32_t* key_to_payload() const {
|
||||
return no_duplicate_keys_ ? NULLPTR : row_offset_for_key_.data();
|
||||
}
|
||||
const uint8_t* has_match() const {
|
||||
return has_match_.empty() ? NULLPTR : has_match_.data();
|
||||
}
|
||||
int64_t num_keys() const { return map_.keys()->num_rows(); }
|
||||
int64_t num_rows() const {
|
||||
return no_duplicate_keys_ ? num_keys() : row_offset_for_key_[num_keys()];
|
||||
}
|
||||
|
||||
uint32_t payload_id_to_key_id(uint32_t payload_id) const;
|
||||
// Input payload ids must form an increasing sequence.
|
||||
//
|
||||
void payload_ids_to_key_ids(int num_rows, const uint32_t* payload_ids,
|
||||
uint32_t* key_ids) const;
|
||||
|
||||
private:
|
||||
uint8_t* local_has_match(int64_t thread_id);
|
||||
|
||||
// Degree of parallelism (number of threads)
|
||||
int dop_;
|
||||
|
||||
struct ThreadLocalState {
|
||||
std::vector<uint8_t> has_match;
|
||||
};
|
||||
std::vector<ThreadLocalState> local_states_;
|
||||
std::vector<uint8_t> has_match_;
|
||||
|
||||
SwissTableWithKeys map_;
|
||||
|
||||
bool no_duplicate_keys_;
|
||||
// Not used if no_duplicate_keys_ is true.
|
||||
std::vector<uint32_t> row_offset_for_key_;
|
||||
|
||||
bool no_payload_columns_;
|
||||
// Not used if no_payload_columns_ is true.
|
||||
RowArray payloads_;
|
||||
};
|
||||
|
||||
// Implements parallel build process for hash table for join from a sequence of
|
||||
// exec batches with input rows.
|
||||
//
|
||||
class SwissTableForJoinBuild {
|
||||
public:
|
||||
Status Init(SwissTableForJoin* target, int dop, int64_t num_rows,
|
||||
bool reject_duplicate_keys, bool no_payload,
|
||||
const std::vector<KeyColumnMetadata>& key_types,
|
||||
const std::vector<KeyColumnMetadata>& payload_types, MemoryPool* pool,
|
||||
int64_t hardware_flags);
|
||||
|
||||
// In the first phase of parallel hash table build, threads pick unprocessed
|
||||
// exec batches, partition the rows based on hash, and update all of the
|
||||
// partitions with information related to that batch of rows.
|
||||
//
|
||||
Status PushNextBatch(int64_t thread_id, const ExecBatch& key_batch,
|
||||
const ExecBatch* payload_batch_maybe_null,
|
||||
util::TempVectorStack* temp_stack);
|
||||
|
||||
// Allocate memory and initialize counters required for parallel merging of
|
||||
// hash table partitions.
|
||||
// Single-threaded.
|
||||
//
|
||||
Status PreparePrtnMerge();
|
||||
|
||||
// Second phase of parallel hash table build.
|
||||
// Each partition can be processed by a different thread.
|
||||
// Parallel step.
|
||||
//
|
||||
void PrtnMerge(int prtn_id);
|
||||
|
||||
// Single-threaded processing of the rows that have been skipped during
|
||||
// parallel merging phase, due to hash table search resulting in crossing
|
||||
// partition boundaries.
|
||||
//
|
||||
void FinishPrtnMerge(util::TempVectorStack* temp_stack);
|
||||
|
||||
// The number of partitions is the number of parallel tasks to execute during
|
||||
// the final phase of hash table build process.
|
||||
//
|
||||
int num_prtns() const { return num_prtns_; }
|
||||
|
||||
bool no_payload() const { return no_payload_; }
|
||||
|
||||
private:
|
||||
void InitRowArray();
|
||||
Status ProcessPartition(int64_t thread_id, const ExecBatch& key_batch,
|
||||
const ExecBatch* payload_batch_maybe_null,
|
||||
util::TempVectorStack* temp_stack, int prtn_id);
|
||||
|
||||
SwissTableForJoin* target_;
|
||||
// DOP stands for Degree Of Parallelism - the maximum number of participating
|
||||
// threads.
|
||||
//
|
||||
int dop_;
|
||||
// Partition is a unit of parallel work.
|
||||
//
|
||||
// There must be power of 2 partitions (bits of hash will be used to
|
||||
// identify them).
|
||||
//
|
||||
// Pick number of partitions at least equal to the number of threads (degree
|
||||
// of parallelism).
|
||||
//
|
||||
int log_num_prtns_;
|
||||
int num_prtns_;
|
||||
int64_t num_rows_;
|
||||
// Left-semi and left-anti-semi joins do not need more than one copy of the
|
||||
// same key in the hash table.
|
||||
// This flag, if set, will result in filtering rows with duplicate keys before
|
||||
// inserting them into hash table.
|
||||
//
|
||||
// Since left-semi and left-anti-semi joins also do not need payload, when
|
||||
// this flag is set there also will not be any processing of payload.
|
||||
//
|
||||
bool reject_duplicate_keys_;
|
||||
// This flag, when set, will result in skipping any processing of the payload.
|
||||
//
|
||||
// The flag for rejecting duplicate keys (which should be set for left-semi
|
||||
// and left-anti joins), when set, will force this flag to also be set, but
|
||||
// other join flavors may set it to true as well if no payload columns are
|
||||
// needed for join output.
|
||||
//
|
||||
bool no_payload_;
|
||||
MemoryPool* pool_;
|
||||
int64_t hardware_flags_;
|
||||
|
||||
// One per partition.
|
||||
//
|
||||
struct PartitionState {
|
||||
SwissTableWithKeys keys;
|
||||
RowArray payloads;
|
||||
std::vector<uint32_t> key_ids;
|
||||
std::vector<uint32_t> overflow_key_ids;
|
||||
std::vector<uint32_t> overflow_hashes;
|
||||
};
|
||||
|
||||
// One per thread.
|
||||
//
|
||||
// Buffers for storing temporary intermediate results when processing input
|
||||
// batches.
|
||||
//
|
||||
struct ThreadState {
|
||||
std::vector<uint32_t> batch_hashes;
|
||||
std::vector<uint16_t> batch_prtn_ranges;
|
||||
std::vector<uint16_t> batch_prtn_row_ids;
|
||||
std::vector<int> temp_prtn_ids;
|
||||
std::vector<uint32_t> temp_group_ids;
|
||||
std::vector<KeyColumnArray> temp_column_arrays;
|
||||
};
|
||||
|
||||
std::vector<PartitionState> prtn_states_;
|
||||
std::vector<ThreadState> thread_states_;
|
||||
PartitionLocks prtn_locks_;
|
||||
|
||||
std::vector<int64_t> partition_keys_first_row_id_;
|
||||
std::vector<int64_t> partition_payloads_first_row_id_;
|
||||
};
|
||||
|
||||
class JoinResultMaterialize {
|
||||
public:
|
||||
void Init(MemoryPool* pool, const HashJoinProjectionMaps* probe_schemas,
|
||||
const HashJoinProjectionMaps* build_schemas);
|
||||
|
||||
void SetBuildSide(const RowArray* build_keys, const RowArray* build_payloads,
|
||||
bool payload_id_same_as_key_id);
|
||||
|
||||
// Input probe side batches should contain all key columns followed by all
|
||||
// payload columns.
|
||||
//
|
||||
Status AppendProbeOnly(const ExecBatch& key_and_payload, int num_rows_to_append,
|
||||
const uint16_t* row_ids, int* num_rows_appended);
|
||||
|
||||
Status AppendBuildOnly(int num_rows_to_append, const uint32_t* key_ids,
|
||||
const uint32_t* payload_ids, int* num_rows_appended);
|
||||
|
||||
Status Append(const ExecBatch& key_and_payload, int num_rows_to_append,
|
||||
const uint16_t* row_ids, const uint32_t* key_ids,
|
||||
const uint32_t* payload_ids, int* num_rows_appended);
|
||||
|
||||
// Should only be called if num_rows() returns non-zero.
|
||||
//
|
||||
Status Flush(ExecBatch* out);
|
||||
|
||||
int num_rows() const { return num_rows_; }
|
||||
|
||||
template <class APPEND_ROWS_FN, class OUTPUT_BATCH_FN>
|
||||
Status AppendAndOutput(int num_rows_to_append, const APPEND_ROWS_FN& append_rows_fn,
|
||||
const OUTPUT_BATCH_FN& output_batch_fn) {
|
||||
int offset = 0;
|
||||
for (;;) {
|
||||
int num_rows_appended = 0;
|
||||
ARROW_RETURN_NOT_OK(append_rows_fn(num_rows_to_append, offset, &num_rows_appended));
|
||||
if (num_rows_appended < num_rows_to_append) {
|
||||
ExecBatch batch;
|
||||
ARROW_RETURN_NOT_OK(Flush(&batch));
|
||||
output_batch_fn(batch);
|
||||
num_rows_to_append -= num_rows_appended;
|
||||
offset += num_rows_appended;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
template <class OUTPUT_BATCH_FN>
|
||||
Status AppendProbeOnly(const ExecBatch& key_and_payload, int num_rows_to_append,
|
||||
const uint16_t* row_ids, OUTPUT_BATCH_FN output_batch_fn) {
|
||||
return AppendAndOutput(
|
||||
num_rows_to_append,
|
||||
[&](int num_rows_to_append_left, int offset, int* num_rows_appended) {
|
||||
return AppendProbeOnly(key_and_payload, num_rows_to_append_left,
|
||||
row_ids + offset, num_rows_appended);
|
||||
},
|
||||
output_batch_fn);
|
||||
}
|
||||
|
||||
template <class OUTPUT_BATCH_FN>
|
||||
Status AppendBuildOnly(int num_rows_to_append, const uint32_t* key_ids,
|
||||
const uint32_t* payload_ids, OUTPUT_BATCH_FN output_batch_fn) {
|
||||
return AppendAndOutput(
|
||||
num_rows_to_append,
|
||||
[&](int num_rows_to_append_left, int offset, int* num_rows_appended) {
|
||||
return AppendBuildOnly(
|
||||
num_rows_to_append_left, key_ids ? key_ids + offset : NULLPTR,
|
||||
payload_ids ? payload_ids + offset : NULLPTR, num_rows_appended);
|
||||
},
|
||||
output_batch_fn);
|
||||
}
|
||||
|
||||
template <class OUTPUT_BATCH_FN>
|
||||
Status Append(const ExecBatch& key_and_payload, int num_rows_to_append,
|
||||
const uint16_t* row_ids, const uint32_t* key_ids,
|
||||
const uint32_t* payload_ids, OUTPUT_BATCH_FN output_batch_fn) {
|
||||
return AppendAndOutput(
|
||||
num_rows_to_append,
|
||||
[&](int num_rows_to_append_left, int offset, int* num_rows_appended) {
|
||||
return Append(key_and_payload, num_rows_to_append_left,
|
||||
row_ids ? row_ids + offset : NULLPTR,
|
||||
key_ids ? key_ids + offset : NULLPTR,
|
||||
payload_ids ? payload_ids + offset : NULLPTR, num_rows_appended);
|
||||
},
|
||||
output_batch_fn);
|
||||
}
|
||||
|
||||
template <class OUTPUT_BATCH_FN>
|
||||
Status Flush(OUTPUT_BATCH_FN output_batch_fn) {
|
||||
if (num_rows_ > 0) {
|
||||
ExecBatch batch({}, num_rows_);
|
||||
ARROW_RETURN_NOT_OK(Flush(&batch));
|
||||
output_batch_fn(std::move(batch));
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
int64_t num_produced_batches() const { return num_produced_batches_; }
|
||||
|
||||
private:
|
||||
bool HasProbeOutput() const;
|
||||
bool HasBuildKeyOutput() const;
|
||||
bool HasBuildPayloadOutput() const;
|
||||
bool NeedsKeyId() const;
|
||||
bool NeedsPayloadId() const;
|
||||
Result<std::shared_ptr<ArrayData>> FlushBuildColumn(
|
||||
const std::shared_ptr<DataType>& data_type, const RowArray* row_array,
|
||||
int column_id, uint32_t* row_ids);
|
||||
|
||||
MemoryPool* pool_;
|
||||
const HashJoinProjectionMaps* probe_schemas_;
|
||||
const HashJoinProjectionMaps* build_schemas_;
|
||||
const RowArray* build_keys_;
|
||||
// Payload array pointer may be left as null, if no payload columns are
|
||||
// in the output column set.
|
||||
//
|
||||
const RowArray* build_payloads_;
|
||||
// If true, then ignore updating payload ids and use key ids instead when
|
||||
// reading.
|
||||
//
|
||||
bool payload_id_same_as_key_id_;
|
||||
std::vector<int> probe_output_to_key_and_payload_;
|
||||
|
||||
// Number of accumulated rows (since last flush)
|
||||
//
|
||||
int num_rows_;
|
||||
// Accumulated output columns from probe side batches.
|
||||
//
|
||||
ExecBatchBuilder batch_builder_;
|
||||
// Accumulated build side row references.
|
||||
//
|
||||
std::vector<uint32_t> key_ids_;
|
||||
std::vector<uint32_t> payload_ids_;
|
||||
// Information about ranges of rows from build side,
|
||||
// that in the accumulated materialized results have all fields set to null.
|
||||
//
|
||||
// Each pair contains index of the first output row in the range and the
|
||||
// length of the range. Only rows outside of these ranges have data present in
|
||||
// the key_ids_ and payload_ids_ arrays.
|
||||
//
|
||||
std::vector<std::pair<int, int>> null_ranges_;
|
||||
|
||||
int64_t num_produced_batches_;
|
||||
};
|
||||
|
||||
// When comparing two join key values to check if they are equal, hash join allows to
|
||||
// chose (even separately for each field within the join key) whether two null values are
|
||||
// considered to be equal (IS comparison) or not (EQ comparison). For EQ comparison we
|
||||
// need to filter rows with nulls in keys outside of hash table lookups, since hash table
|
||||
// implementation always treats two nulls as equal (like IS comparison).
|
||||
//
|
||||
// Implements evaluating filter bit vector eliminating rows that do not have
|
||||
// join matches due to nulls in key columns.
|
||||
//
|
||||
class JoinNullFilter {
|
||||
public:
|
||||
// The batch for which the filter bit vector will be computed
|
||||
// needs to start with all key columns but it may contain more columns
|
||||
// (payload) following them.
|
||||
//
|
||||
static void Filter(const ExecBatch& key_batch, int batch_start_row, int num_batch_rows,
|
||||
const std::vector<JoinKeyCmp>& cmp, bool* all_valid,
|
||||
bool and_with_input, uint8_t* out_bit_vector);
|
||||
};
|
||||
|
||||
// A helper class that takes hash table lookup results for a range of rows in
|
||||
// input batch, that is:
|
||||
// - bit vector marking whether there was a key match in the hash table
|
||||
// - key id if there was a match
|
||||
// - mapping from key id to a range of payload ids associated with that key
|
||||
// (representing multiple matching rows in a hash table for a single row in an
|
||||
// input batch), and iterates output batches of limited size containing tuples
|
||||
// describing all matching pairs of rows:
|
||||
// - input batch row id (only rows that have matches in the hash table are
|
||||
// included)
|
||||
// - key id for a match
|
||||
// - payload id (different one for each matching row in the hash table)
|
||||
//
|
||||
class JoinMatchIterator {
|
||||
public:
|
||||
void SetLookupResult(int num_batch_rows, int start_batch_row,
|
||||
const uint8_t* batch_has_match, const uint32_t* key_ids,
|
||||
bool no_duplicate_keys, const uint32_t* key_to_payload);
|
||||
bool GetNextBatch(int num_rows_max, int* out_num_rows, uint16_t* batch_row_ids,
|
||||
uint32_t* key_ids, uint32_t* payload_ids);
|
||||
|
||||
private:
|
||||
int num_batch_rows_;
|
||||
int start_batch_row_;
|
||||
const uint8_t* batch_has_match_;
|
||||
const uint32_t* key_ids_;
|
||||
|
||||
bool no_duplicate_keys_;
|
||||
const uint32_t* key_to_payload_;
|
||||
|
||||
// Index of the first not fully processed input row, or number of rows if all
|
||||
// have been processed. May be pointing to a row with no matches.
|
||||
//
|
||||
int current_row_;
|
||||
// Index of the first unprocessed match for the input row. May be zero if the
|
||||
// row has no matches.
|
||||
//
|
||||
int current_match_for_row_;
|
||||
};
|
||||
|
||||
// Implements entire processing of a probe side exec batch,
|
||||
// provided the join hash table is already built and available.
|
||||
//
|
||||
class JoinProbeProcessor {
|
||||
public:
|
||||
using OutputBatchFn = std::function<void(int64_t, ExecBatch)>;
|
||||
|
||||
void Init(int num_key_columns, JoinType join_type, SwissTableForJoin* hash_table,
|
||||
std::vector<JoinResultMaterialize*> materialize,
|
||||
const std::vector<JoinKeyCmp>* cmp, OutputBatchFn output_batch_fn);
|
||||
Status OnNextBatch(int64_t thread_id, const ExecBatch& keypayload_batch,
|
||||
util::TempVectorStack* temp_stack,
|
||||
std::vector<KeyColumnArray>* temp_column_arrays);
|
||||
|
||||
// Must be called by a single-thread having exclusive access to the instance
|
||||
// of this class. The caller is responsible for ensuring that.
|
||||
//
|
||||
Status OnFinished();
|
||||
|
||||
private:
|
||||
int num_key_columns_;
|
||||
JoinType join_type_;
|
||||
|
||||
SwissTableForJoin* hash_table_;
|
||||
// One element per thread
|
||||
//
|
||||
std::vector<JoinResultMaterialize*> materialize_;
|
||||
const std::vector<JoinKeyCmp>* cmp_;
|
||||
OutputBatchFn output_batch_fn_;
|
||||
};
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,100 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/status.h"
|
||||
#include "arrow/util/logging.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
// Atomic value surrounded by padding bytes to avoid cache line invalidation
|
||||
// whenever it is modified by a concurrent thread on a different CPU core.
|
||||
//
|
||||
template <typename T>
|
||||
class AtomicWithPadding {
|
||||
private:
|
||||
static constexpr int kCacheLineSize = 64;
|
||||
uint8_t padding_before[kCacheLineSize];
|
||||
|
||||
public:
|
||||
std::atomic<T> value;
|
||||
|
||||
private:
|
||||
uint8_t padding_after[kCacheLineSize];
|
||||
};
|
||||
|
||||
// Used for asynchronous execution of operations that can be broken into
|
||||
// a fixed number of symmetric tasks that can be executed concurrently.
|
||||
//
|
||||
// Implements priorities between multiple such operations, called task groups.
|
||||
//
|
||||
// Allows to specify the maximum number of in-flight tasks at any moment.
|
||||
//
|
||||
// Also allows for executing next pending tasks immediately using a caller thread.
|
||||
//
|
||||
class ARROW_EXPORT TaskScheduler {
|
||||
public:
|
||||
using TaskImpl = std::function<Status(size_t, int64_t)>;
|
||||
using TaskGroupContinuationImpl = std::function<Status(size_t)>;
|
||||
using ScheduleImpl = std::function<Status(TaskGroupContinuationImpl)>;
|
||||
using AbortContinuationImpl = std::function<void()>;
|
||||
|
||||
virtual ~TaskScheduler() = default;
|
||||
|
||||
// Order in which task groups are registered represents priorities of their tasks
|
||||
// (the first group has the highest priority).
|
||||
//
|
||||
// Returns task group identifier that is used to request operations on the task group.
|
||||
virtual int RegisterTaskGroup(TaskImpl task_impl,
|
||||
TaskGroupContinuationImpl cont_impl) = 0;
|
||||
|
||||
virtual void RegisterEnd() = 0;
|
||||
|
||||
// total_num_tasks may be zero, in which case task group continuation will be executed
|
||||
// immediately
|
||||
virtual Status StartTaskGroup(size_t thread_id, int group_id,
|
||||
int64_t total_num_tasks) = 0;
|
||||
|
||||
// Execute given number of tasks immediately using caller thread
|
||||
virtual Status ExecuteMore(size_t thread_id, int num_tasks_to_execute,
|
||||
bool execute_all) = 0;
|
||||
|
||||
// Begin scheduling tasks using provided callback and
|
||||
// the limit on the number of in-flight tasks at any moment.
|
||||
//
|
||||
// Scheduling will continue as long as there are waiting tasks.
|
||||
//
|
||||
// It will automatically resume whenever new task group gets started.
|
||||
virtual Status StartScheduling(size_t thread_id, ScheduleImpl schedule_impl,
|
||||
int num_concurrent_tasks, bool use_sync_execution) = 0;
|
||||
|
||||
// Abort scheduling and execution.
|
||||
// Used in case of being notified about unrecoverable error for the entire query.
|
||||
virtual void Abort(AbortContinuationImpl impl) = 0;
|
||||
|
||||
static std::unique_ptr<TaskScheduler> Make();
|
||||
};
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,212 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <arrow/testing/gtest_util.h>
|
||||
#include <arrow/util/vector.h>
|
||||
|
||||
#include <functional>
|
||||
#include <random>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/exec.h"
|
||||
#include "arrow/compute/exec/exec_plan.h"
|
||||
#include "arrow/compute/kernel.h"
|
||||
#include "arrow/testing/visibility.h"
|
||||
#include "arrow/util/async_generator.h"
|
||||
#include "arrow/util/pcg_random.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
using StartProducingFunc = std::function<Status(ExecNode*)>;
|
||||
using StopProducingFunc = std::function<void(ExecNode*)>;
|
||||
|
||||
// Make a dummy node that has no execution behaviour
|
||||
ARROW_TESTING_EXPORT
|
||||
ExecNode* MakeDummyNode(ExecPlan* plan, std::string label, std::vector<ExecNode*> inputs,
|
||||
int num_outputs, StartProducingFunc = {}, StopProducingFunc = {});
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
ExecBatch ExecBatchFromJSON(const std::vector<TypeHolder>& types, std::string_view json);
|
||||
|
||||
/// \brief Shape qualifier for value types. In certain instances
|
||||
/// (e.g. "map_lookup" kernel), an argument may only be a scalar, where in
|
||||
/// other kernels arguments can be arrays or scalars
|
||||
enum class ArgShape { ANY, ARRAY, SCALAR };
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
ExecBatch ExecBatchFromJSON(const std::vector<TypeHolder>& types,
|
||||
const std::vector<ArgShape>& shapes, std::string_view json);
|
||||
|
||||
struct BatchesWithSchema {
|
||||
std::vector<ExecBatch> batches;
|
||||
std::shared_ptr<Schema> schema;
|
||||
|
||||
AsyncGenerator<std::optional<ExecBatch>> gen(bool parallel, bool slow) const {
|
||||
auto opt_batches = ::arrow::internal::MapVector(
|
||||
[](ExecBatch batch) { return std::make_optional(std::move(batch)); }, batches);
|
||||
|
||||
AsyncGenerator<std::optional<ExecBatch>> gen;
|
||||
|
||||
if (parallel) {
|
||||
// emulate batches completing initial decode-after-scan on a cpu thread
|
||||
gen = MakeBackgroundGenerator(MakeVectorIterator(std::move(opt_batches)),
|
||||
::arrow::internal::GetCpuThreadPool())
|
||||
.ValueOrDie();
|
||||
|
||||
// ensure that callbacks are not executed immediately on a background thread
|
||||
gen =
|
||||
MakeTransferredGenerator(std::move(gen), ::arrow::internal::GetCpuThreadPool());
|
||||
} else {
|
||||
gen = MakeVectorGenerator(std::move(opt_batches));
|
||||
}
|
||||
|
||||
if (slow) {
|
||||
gen =
|
||||
MakeMappedGenerator(std::move(gen), [](const std::optional<ExecBatch>& batch) {
|
||||
SleepABit();
|
||||
return batch;
|
||||
});
|
||||
}
|
||||
|
||||
return gen;
|
||||
}
|
||||
};
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
Future<> StartAndFinish(ExecPlan* plan);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
Future<std::vector<ExecBatch>> StartAndCollect(
|
||||
ExecPlan* plan, AsyncGenerator<std::optional<ExecBatch>> gen);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
BatchesWithSchema MakeBasicBatches();
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
BatchesWithSchema MakeNestedBatches();
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
BatchesWithSchema MakeRandomBatches(const std::shared_ptr<Schema>& schema,
|
||||
int num_batches = 10, int batch_size = 4,
|
||||
int64_t alignment = kDefaultBufferAlignment,
|
||||
MemoryPool* memory_pool = nullptr);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
BatchesWithSchema MakeBatchesFromString(const std::shared_ptr<Schema>& schema,
|
||||
const std::vector<std::string_view>& json_strings,
|
||||
int multiplicity = 1);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
Result<std::vector<std::shared_ptr<ArrayVector>>> ToArrayVectors(
|
||||
const BatchesWithSchema& batches_with_schema);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
Result<std::vector<std::shared_ptr<ExecBatch>>> ToExecBatches(
|
||||
const BatchesWithSchema& batches);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
Result<std::vector<std::shared_ptr<RecordBatch>>> ToRecordBatches(
|
||||
const BatchesWithSchema& batches);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
Result<std::shared_ptr<RecordBatchReader>> ToRecordBatchReader(
|
||||
const BatchesWithSchema& batches_with_schema);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
Result<std::vector<std::shared_ptr<ArrayVector>>> ToArrayVectors(
|
||||
const BatchesWithSchema& batches_with_schema);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
Result<std::vector<std::shared_ptr<ExecBatch>>> ToExecBatches(
|
||||
const BatchesWithSchema& batches);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
Result<std::vector<std::shared_ptr<RecordBatch>>> ToRecordBatches(
|
||||
const BatchesWithSchema& batches);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
Result<std::shared_ptr<Table>> SortTableOnAllFields(const std::shared_ptr<Table>& tab);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
void AssertTablesEqualIgnoringOrder(const std::shared_ptr<Table>& exp,
|
||||
const std::shared_ptr<Table>& act);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
void AssertExecBatchesEqualIgnoringOrder(const std::shared_ptr<Schema>& schema,
|
||||
const std::vector<ExecBatch>& exp,
|
||||
const std::vector<ExecBatch>& act);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
bool operator==(const Declaration&, const Declaration&);
|
||||
|
||||
ARROW_TESTING_EXPORT
|
||||
void PrintTo(const Declaration& decl, std::ostream* os);
|
||||
|
||||
class Random64Bit {
|
||||
public:
|
||||
explicit Random64Bit(int32_t seed) : rng_(seed) {}
|
||||
uint64_t next() { return dist_(rng_); }
|
||||
template <typename T>
|
||||
inline T from_range(const T& min_val, const T& max_val) {
|
||||
return static_cast<T>(min_val + (next() % (max_val - min_val + 1)));
|
||||
}
|
||||
|
||||
private:
|
||||
random::pcg32_fast rng_;
|
||||
std::uniform_int_distribution<uint64_t> dist_;
|
||||
};
|
||||
|
||||
/// Specify properties of a table to be generated.
|
||||
struct TableGenerationProperties {
|
||||
/// Indicates the amount of time between data points that lie between
|
||||
/// the start and end parameters.
|
||||
int time_frequency;
|
||||
/// The number of additional random columns in the table.
|
||||
int num_columns;
|
||||
/// The number of unique keys in the table.
|
||||
int num_ids;
|
||||
/// Specifies the prefix of each randomly generated column.
|
||||
std::string column_prefix;
|
||||
/// Specifies the minimum value in the randomly generated column(s).
|
||||
int min_column_value;
|
||||
/// Specifies the maximum value in the randomly generated column(s).
|
||||
int max_column_value;
|
||||
/// The random seed the random array generator is given to generate the additional
|
||||
/// columns.
|
||||
int seed;
|
||||
/// Specifies the beginning of 'time' recorded in the table, inclusive.
|
||||
int start;
|
||||
/// Specifies the end of 'time' recorded in the table, inclusive.
|
||||
int end;
|
||||
};
|
||||
|
||||
/// The table generated in accordance to the TableGenerationProperties has the following
|
||||
/// schema: time (int64) id (int32) [properties.column_prefix]{idx} (float64)
|
||||
/// where idx is in [0, properties.num_columns)
|
||||
/// Each id has rows corresponding to a singular data point in the time range (start, end,
|
||||
/// time_frequency). The table is sorted by time.
|
||||
ARROW_TESTING_EXPORT
|
||||
Result<std::shared_ptr<Table>> MakeRandomTimeSeriesTable(
|
||||
const TableGenerationProperties& properties);
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,64 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/type_fwd.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/status.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
namespace internal {
|
||||
|
||||
class ARROW_EXPORT TpchGen {
|
||||
public:
|
||||
virtual ~TpchGen() = default;
|
||||
|
||||
/*
|
||||
* \brief Create a factory for nodes that generate TPC-H data
|
||||
*
|
||||
* Note: Individual tables will reference each other. It is important that you only
|
||||
* create a single TpchGen instance for each plan and then you can create nodes for each
|
||||
* table from that single TpchGen instance. Note: Every batch will be scheduled as a new
|
||||
* task using the ExecPlan's scheduler.
|
||||
*/
|
||||
static Result<std::unique_ptr<TpchGen>> Make(
|
||||
ExecPlan* plan, double scale_factor = 1.0, int64_t batch_size = 4096,
|
||||
std::optional<int64_t> seed = std::nullopt);
|
||||
|
||||
// The below methods will create and add an ExecNode to the plan that generates
|
||||
// data for the desired table. If columns is empty, all columns will be generated.
|
||||
// The methods return the added ExecNode, which should be used for inputs.
|
||||
virtual Result<ExecNode*> Supplier(std::vector<std::string> columns = {}) = 0;
|
||||
virtual Result<ExecNode*> Part(std::vector<std::string> columns = {}) = 0;
|
||||
virtual Result<ExecNode*> PartSupp(std::vector<std::string> columns = {}) = 0;
|
||||
virtual Result<ExecNode*> Customer(std::vector<std::string> columns = {}) = 0;
|
||||
virtual Result<ExecNode*> Orders(std::vector<std::string> columns = {}) = 0;
|
||||
virtual Result<ExecNode*> Lineitem(std::vector<std::string> columns = {}) = 0;
|
||||
virtual Result<ExecNode*> Nation(std::vector<std::string> columns = {}) = 0;
|
||||
virtual Result<ExecNode*> Region(std::vector<std::string> columns = {}) = 0;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,428 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/buffer.h"
|
||||
#include "arrow/compute/exec/expression.h"
|
||||
#include "arrow/compute/exec/options.h"
|
||||
#include "arrow/compute/type_fwd.h"
|
||||
#include "arrow/memory_pool.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/status.h"
|
||||
#include "arrow/util/bit_util.h"
|
||||
#include "arrow/util/cpu_info.h"
|
||||
#include "arrow/util/logging.h"
|
||||
#include "arrow/util/mutex.h"
|
||||
#include "arrow/util/thread_pool.h"
|
||||
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
#define BYTESWAP(x) __builtin_bswap64(x)
|
||||
#define ROTL(x, n) (((x) << (n)) | ((x) >> ((-n) & 31)))
|
||||
#define ROTL64(x, n) (((x) << (n)) | ((x) >> ((-n) & 63)))
|
||||
#define PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
|
||||
#elif defined(_MSC_VER)
|
||||
#include <intrin.h>
|
||||
#define BYTESWAP(x) _byteswap_uint64(x)
|
||||
#define ROTL(x, n) _rotl((x), (n))
|
||||
#define ROTL64(x, n) _rotl64((x), (n))
|
||||
#if defined(_M_X64) || defined(_M_I86)
|
||||
#include <mmintrin.h> // https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx
|
||||
#define PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
|
||||
#else
|
||||
#define PREFETCH(ptr) (void)(ptr) /* disabled */
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace arrow {
|
||||
namespace util {
|
||||
|
||||
template <typename T>
|
||||
inline void CheckAlignment(const void* ptr) {
|
||||
ARROW_DCHECK(reinterpret_cast<uint64_t>(ptr) % sizeof(T) == 0);
|
||||
}
|
||||
|
||||
// Some platforms typedef int64_t as long int instead of long long int,
|
||||
// which breaks the _mm256_i64gather_epi64 and _mm256_i32gather_epi64 intrinsics
|
||||
// which need long long.
|
||||
// We use the cast to the type below in these intrinsics to make the code
|
||||
// compile in all cases.
|
||||
//
|
||||
using int64_for_gather_t = const long long int; // NOLINT runtime-int
|
||||
|
||||
// All MiniBatch... classes use TempVectorStack for vector allocations and can
|
||||
// only work with vectors up to 1024 elements.
|
||||
//
|
||||
// They should only be allocated on the stack to guarantee the right sequence
|
||||
// of allocation and deallocation of vectors from TempVectorStack.
|
||||
//
|
||||
class MiniBatch {
|
||||
public:
|
||||
static constexpr int kLogMiniBatchLength = 10;
|
||||
static constexpr int kMiniBatchLength = 1 << kLogMiniBatchLength;
|
||||
};
|
||||
|
||||
/// Storage used to allocate temporary vectors of a batch size.
|
||||
/// Temporary vectors should resemble allocating temporary variables on the stack
|
||||
/// but in the context of vectorized processing where we need to store a vector of
|
||||
/// temporaries instead of a single value.
|
||||
class TempVectorStack {
|
||||
template <typename>
|
||||
friend class TempVectorHolder;
|
||||
|
||||
public:
|
||||
Status Init(MemoryPool* pool, int64_t size) {
|
||||
num_vectors_ = 0;
|
||||
top_ = 0;
|
||||
buffer_size_ = PaddedAllocationSize(size) + kPadding + 2 * sizeof(uint64_t);
|
||||
ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateResizableBuffer(size, pool));
|
||||
// Ensure later operations don't accidentally read uninitialized memory.
|
||||
std::memset(buffer->mutable_data(), 0xFF, size);
|
||||
buffer_ = std::move(buffer);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
private:
|
||||
int64_t PaddedAllocationSize(int64_t num_bytes) {
|
||||
// Round up allocation size to multiple of 8 bytes
|
||||
// to avoid returning temp vectors with unaligned address.
|
||||
//
|
||||
// Also add padding at the end to facilitate loads and stores
|
||||
// using SIMD when number of vector elements is not divisible
|
||||
// by the number of SIMD lanes.
|
||||
//
|
||||
return ::arrow::bit_util::RoundUp(num_bytes, sizeof(int64_t)) + kPadding;
|
||||
}
|
||||
void alloc(uint32_t num_bytes, uint8_t** data, int* id) {
|
||||
int64_t old_top = top_;
|
||||
top_ += PaddedAllocationSize(num_bytes) + 2 * sizeof(uint64_t);
|
||||
// Stack overflow check
|
||||
ARROW_DCHECK(top_ <= buffer_size_);
|
||||
*data = buffer_->mutable_data() + old_top + sizeof(uint64_t);
|
||||
// We set 8 bytes before the beginning of the allocated range and
|
||||
// 8 bytes after the end to check for stack overflow (which would
|
||||
// result in those known bytes being corrupted).
|
||||
reinterpret_cast<uint64_t*>(buffer_->mutable_data() + old_top)[0] = kGuard1;
|
||||
reinterpret_cast<uint64_t*>(buffer_->mutable_data() + top_)[-1] = kGuard2;
|
||||
*id = num_vectors_++;
|
||||
}
|
||||
void release(int id, uint32_t num_bytes) {
|
||||
ARROW_DCHECK(num_vectors_ == id + 1);
|
||||
int64_t size = PaddedAllocationSize(num_bytes) + 2 * sizeof(uint64_t);
|
||||
ARROW_DCHECK(reinterpret_cast<const uint64_t*>(buffer_->mutable_data() + top_)[-1] ==
|
||||
kGuard2);
|
||||
ARROW_DCHECK(top_ >= size);
|
||||
top_ -= size;
|
||||
ARROW_DCHECK(reinterpret_cast<const uint64_t*>(buffer_->mutable_data() + top_)[0] ==
|
||||
kGuard1);
|
||||
--num_vectors_;
|
||||
}
|
||||
static constexpr uint64_t kGuard1 = 0x3141592653589793ULL;
|
||||
static constexpr uint64_t kGuard2 = 0x0577215664901532ULL;
|
||||
static constexpr int64_t kPadding = 64;
|
||||
int num_vectors_;
|
||||
int64_t top_;
|
||||
std::unique_ptr<Buffer> buffer_;
|
||||
int64_t buffer_size_;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class TempVectorHolder {
|
||||
friend class TempVectorStack;
|
||||
|
||||
public:
|
||||
~TempVectorHolder() { stack_->release(id_, num_elements_ * sizeof(T)); }
|
||||
T* mutable_data() { return reinterpret_cast<T*>(data_); }
|
||||
TempVectorHolder(TempVectorStack* stack, uint32_t num_elements) {
|
||||
stack_ = stack;
|
||||
num_elements_ = num_elements;
|
||||
stack_->alloc(num_elements * sizeof(T), &data_, &id_);
|
||||
}
|
||||
|
||||
private:
|
||||
TempVectorStack* stack_;
|
||||
uint8_t* data_;
|
||||
int id_;
|
||||
uint32_t num_elements_;
|
||||
};
|
||||
|
||||
class bit_util {
|
||||
public:
|
||||
static void bits_to_indexes(int bit_to_search, int64_t hardware_flags,
|
||||
const int num_bits, const uint8_t* bits, int* num_indexes,
|
||||
uint16_t* indexes, int bit_offset = 0);
|
||||
|
||||
static void bits_filter_indexes(int bit_to_search, int64_t hardware_flags,
|
||||
const int num_bits, const uint8_t* bits,
|
||||
const uint16_t* input_indexes, int* num_indexes,
|
||||
uint16_t* indexes, int bit_offset = 0);
|
||||
|
||||
// Input and output indexes may be pointing to the same data (in-place filtering).
|
||||
static void bits_split_indexes(int64_t hardware_flags, const int num_bits,
|
||||
const uint8_t* bits, int* num_indexes_bit0,
|
||||
uint16_t* indexes_bit0, uint16_t* indexes_bit1,
|
||||
int bit_offset = 0);
|
||||
|
||||
// Bit 1 is replaced with byte 0xFF.
|
||||
static void bits_to_bytes(int64_t hardware_flags, const int num_bits,
|
||||
const uint8_t* bits, uint8_t* bytes, int bit_offset = 0);
|
||||
|
||||
// Return highest bit of each byte.
|
||||
static void bytes_to_bits(int64_t hardware_flags, const int num_bits,
|
||||
const uint8_t* bytes, uint8_t* bits, int bit_offset = 0);
|
||||
|
||||
static bool are_all_bytes_zero(int64_t hardware_flags, const uint8_t* bytes,
|
||||
uint32_t num_bytes);
|
||||
|
||||
private:
|
||||
inline static uint64_t SafeLoadUpTo8Bytes(const uint8_t* bytes, int num_bytes);
|
||||
inline static void SafeStoreUpTo8Bytes(uint8_t* bytes, int num_bytes, uint64_t value);
|
||||
inline static void bits_to_indexes_helper(uint64_t word, uint16_t base_index,
|
||||
int* num_indexes, uint16_t* indexes);
|
||||
inline static void bits_filter_indexes_helper(uint64_t word,
|
||||
const uint16_t* input_indexes,
|
||||
int* num_indexes, uint16_t* indexes);
|
||||
template <int bit_to_search, bool filter_input_indexes>
|
||||
static void bits_to_indexes_internal(int64_t hardware_flags, const int num_bits,
|
||||
const uint8_t* bits, const uint16_t* input_indexes,
|
||||
int* num_indexes, uint16_t* indexes,
|
||||
uint16_t base_index = 0);
|
||||
|
||||
#if defined(ARROW_HAVE_AVX2)
|
||||
static void bits_to_indexes_avx2(int bit_to_search, const int num_bits,
|
||||
const uint8_t* bits, int* num_indexes,
|
||||
uint16_t* indexes, uint16_t base_index = 0);
|
||||
static void bits_filter_indexes_avx2(int bit_to_search, const int num_bits,
|
||||
const uint8_t* bits, const uint16_t* input_indexes,
|
||||
int* num_indexes, uint16_t* indexes);
|
||||
template <int bit_to_search>
|
||||
static void bits_to_indexes_imp_avx2(const int num_bits, const uint8_t* bits,
|
||||
int* num_indexes, uint16_t* indexes,
|
||||
uint16_t base_index = 0);
|
||||
template <int bit_to_search>
|
||||
static void bits_filter_indexes_imp_avx2(const int num_bits, const uint8_t* bits,
|
||||
const uint16_t* input_indexes,
|
||||
int* num_indexes, uint16_t* indexes);
|
||||
static void bits_to_bytes_avx2(const int num_bits, const uint8_t* bits, uint8_t* bytes);
|
||||
static void bytes_to_bits_avx2(const int num_bits, const uint8_t* bytes, uint8_t* bits);
|
||||
static bool are_all_bytes_zero_avx2(const uint8_t* bytes, uint32_t num_bytes);
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace util
|
||||
namespace compute {
|
||||
|
||||
ARROW_EXPORT
|
||||
Status ValidateExecNodeInputs(ExecPlan* plan, const std::vector<ExecNode*>& inputs,
|
||||
int expected_num_inputs, const char* kind_name);
|
||||
|
||||
ARROW_EXPORT
|
||||
Result<std::shared_ptr<Table>> TableFromExecBatches(
|
||||
const std::shared_ptr<Schema>& schema, const std::vector<ExecBatch>& exec_batches);
|
||||
|
||||
class ARROW_EXPORT AtomicCounter {
|
||||
public:
|
||||
AtomicCounter() = default;
|
||||
|
||||
int count() const { return count_.load(); }
|
||||
|
||||
std::optional<int> total() const {
|
||||
int total = total_.load();
|
||||
if (total == -1) return {};
|
||||
return total;
|
||||
}
|
||||
|
||||
// return true if the counter is complete
|
||||
bool Increment() {
|
||||
DCHECK_NE(count_.load(), total_.load());
|
||||
int count = count_.fetch_add(1) + 1;
|
||||
if (count != total_.load()) return false;
|
||||
return DoneOnce();
|
||||
}
|
||||
|
||||
// return true if the counter is complete
|
||||
bool SetTotal(int total) {
|
||||
total_.store(total);
|
||||
if (count_.load() != total) return false;
|
||||
return DoneOnce();
|
||||
}
|
||||
|
||||
// return true if the counter has not already been completed
|
||||
bool Cancel() { return DoneOnce(); }
|
||||
|
||||
// return true if the counter has finished or been cancelled
|
||||
bool Completed() { return complete_.load(); }
|
||||
|
||||
private:
|
||||
// ensure there is only one true return from Increment(), SetTotal(), or Cancel()
|
||||
bool DoneOnce() {
|
||||
bool expected = false;
|
||||
return complete_.compare_exchange_strong(expected, true);
|
||||
}
|
||||
|
||||
std::atomic<int> count_{0}, total_{-1};
|
||||
std::atomic<bool> complete_{false};
|
||||
};
|
||||
|
||||
class ARROW_EXPORT ThreadIndexer {
|
||||
public:
|
||||
size_t operator()();
|
||||
|
||||
static size_t Capacity();
|
||||
|
||||
private:
|
||||
static size_t Check(size_t thread_index);
|
||||
|
||||
util::Mutex mutex_;
|
||||
std::unordered_map<std::thread::id, size_t> id_to_index_;
|
||||
};
|
||||
|
||||
// Helper class to calculate the modified number of rows to process using SIMD.
|
||||
//
|
||||
// Some array elements at the end will be skipped in order to avoid buffer
|
||||
// overrun, when doing memory loads and stores using larger word size than a
|
||||
// single array element.
|
||||
//
|
||||
class TailSkipForSIMD {
|
||||
public:
|
||||
static int64_t FixBitAccess(int num_bytes_accessed_together, int64_t num_rows,
|
||||
int bit_offset) {
|
||||
int64_t num_bytes = bit_util::BytesForBits(num_rows + bit_offset);
|
||||
int64_t num_bytes_safe =
|
||||
std::max(static_cast<int64_t>(0LL), num_bytes - num_bytes_accessed_together + 1);
|
||||
int64_t num_rows_safe =
|
||||
std::max(static_cast<int64_t>(0LL), 8 * num_bytes_safe - bit_offset);
|
||||
return std::min(num_rows_safe, num_rows);
|
||||
}
|
||||
static int64_t FixBinaryAccess(int num_bytes_accessed_together, int64_t num_rows,
|
||||
int64_t length) {
|
||||
int64_t num_rows_to_skip = bit_util::CeilDiv(length, num_bytes_accessed_together);
|
||||
int64_t num_rows_safe =
|
||||
std::max(static_cast<int64_t>(0LL), num_rows - num_rows_to_skip);
|
||||
return num_rows_safe;
|
||||
}
|
||||
static int64_t FixVarBinaryAccess(int num_bytes_accessed_together, int64_t num_rows,
|
||||
const uint32_t* offsets) {
|
||||
// Do not process rows that could read past the end of the buffer using N
|
||||
// byte loads/stores.
|
||||
//
|
||||
int64_t num_rows_safe = num_rows;
|
||||
while (num_rows_safe > 0 &&
|
||||
offsets[num_rows_safe] + num_bytes_accessed_together > offsets[num_rows]) {
|
||||
--num_rows_safe;
|
||||
}
|
||||
return num_rows_safe;
|
||||
}
|
||||
static int FixSelection(int64_t num_rows_safe, int num_selected,
|
||||
const uint16_t* selection) {
|
||||
int num_selected_safe = num_selected;
|
||||
while (num_selected_safe > 0 && selection[num_selected_safe - 1] >= num_rows_safe) {
|
||||
--num_selected_safe;
|
||||
}
|
||||
return num_selected_safe;
|
||||
}
|
||||
};
|
||||
|
||||
/// \brief A consumer that collects results into an in-memory table
|
||||
struct ARROW_EXPORT TableSinkNodeConsumer : public SinkNodeConsumer {
|
||||
public:
|
||||
TableSinkNodeConsumer(std::shared_ptr<Table>* out, MemoryPool* pool)
|
||||
: out_(out), pool_(pool) {}
|
||||
Status Init(const std::shared_ptr<Schema>& schema,
|
||||
BackpressureControl* backpressure_control, ExecPlan* plan) override;
|
||||
Status Consume(ExecBatch batch) override;
|
||||
Future<> Finish() override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Table>* out_;
|
||||
MemoryPool* pool_;
|
||||
std::shared_ptr<Schema> schema_;
|
||||
std::vector<std::shared_ptr<RecordBatch>> batches_;
|
||||
util::Mutex consume_mutex_;
|
||||
};
|
||||
|
||||
class ARROW_EXPORT NullSinkNodeConsumer : public SinkNodeConsumer {
|
||||
public:
|
||||
Status Init(const std::shared_ptr<Schema>&, BackpressureControl*,
|
||||
ExecPlan* plan) override {
|
||||
return Status::OK();
|
||||
}
|
||||
Status Consume(ExecBatch exec_batch) override { return Status::OK(); }
|
||||
Future<> Finish() override { return Status::OK(); }
|
||||
|
||||
public:
|
||||
static std::shared_ptr<NullSinkNodeConsumer> Make() {
|
||||
return std::make_shared<NullSinkNodeConsumer>();
|
||||
}
|
||||
};
|
||||
|
||||
/// Modify an Expression with pre-order and post-order visitation.
|
||||
/// `pre` will be invoked on each Expression. `pre` will visit Calls before their
|
||||
/// arguments, `post_call` will visit Calls (and no other Expressions) after their
|
||||
/// arguments. Visitors should return the Identical expression to indicate no change; this
|
||||
/// will prevent unnecessary construction in the common case where a modification is not
|
||||
/// possible/necessary/...
|
||||
///
|
||||
/// If an argument was modified, `post_call` visits a reconstructed Call with the modified
|
||||
/// arguments but also receives a pointer to the unmodified Expression as a second
|
||||
/// argument. If no arguments were modified the unmodified Expression* will be nullptr.
|
||||
template <typename PreVisit, typename PostVisitCall>
|
||||
Result<Expression> ModifyExpression(Expression expr, const PreVisit& pre,
|
||||
const PostVisitCall& post_call) {
|
||||
ARROW_ASSIGN_OR_RAISE(expr, Result<Expression>(pre(std::move(expr))));
|
||||
|
||||
auto call = expr.call();
|
||||
if (!call) return expr;
|
||||
|
||||
bool at_least_one_modified = false;
|
||||
std::vector<Expression> modified_arguments;
|
||||
|
||||
for (size_t i = 0; i < call->arguments.size(); ++i) {
|
||||
ARROW_ASSIGN_OR_RAISE(auto modified_argument,
|
||||
ModifyExpression(call->arguments[i], pre, post_call));
|
||||
|
||||
if (Identical(modified_argument, call->arguments[i])) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!at_least_one_modified) {
|
||||
modified_arguments = call->arguments;
|
||||
at_least_one_modified = true;
|
||||
}
|
||||
|
||||
modified_arguments[i] = std::move(modified_argument);
|
||||
}
|
||||
|
||||
if (at_least_one_modified) {
|
||||
// reconstruct the call expression with the modified arguments
|
||||
auto modified_call = *call;
|
||||
modified_call.arguments = std::move(modified_arguments);
|
||||
return post_call(Expression(std::move(modified_call)), &expr);
|
||||
}
|
||||
|
||||
return post_call(std::move(expr), NULLPTR);
|
||||
}
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,441 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
// NOTE: API is EXPERIMENTAL and will change without going through a
|
||||
// deprecation cycle.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/kernel.h"
|
||||
#include "arrow/compute/type_fwd.h"
|
||||
#include "arrow/datum.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/status.h"
|
||||
#include "arrow/util/compare.h"
|
||||
#include "arrow/util/macros.h"
|
||||
#include "arrow/util/visibility.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
/// \defgroup compute-functions Abstract compute function API
|
||||
///
|
||||
/// @{
|
||||
|
||||
/// \brief Extension point for defining options outside libarrow (but
|
||||
/// still within this project).
|
||||
class ARROW_EXPORT FunctionOptionsType {
|
||||
public:
|
||||
virtual ~FunctionOptionsType() = default;
|
||||
|
||||
virtual const char* type_name() const = 0;
|
||||
virtual std::string Stringify(const FunctionOptions&) const = 0;
|
||||
virtual bool Compare(const FunctionOptions&, const FunctionOptions&) const = 0;
|
||||
virtual Result<std::shared_ptr<Buffer>> Serialize(const FunctionOptions&) const;
|
||||
virtual Result<std::unique_ptr<FunctionOptions>> Deserialize(
|
||||
const Buffer& buffer) const;
|
||||
virtual std::unique_ptr<FunctionOptions> Copy(const FunctionOptions&) const = 0;
|
||||
};
|
||||
|
||||
/// \brief Base class for specifying options configuring a function's behavior,
|
||||
/// such as error handling.
|
||||
class ARROW_EXPORT FunctionOptions : public util::EqualityComparable<FunctionOptions> {
|
||||
public:
|
||||
virtual ~FunctionOptions() = default;
|
||||
|
||||
const FunctionOptionsType* options_type() const { return options_type_; }
|
||||
const char* type_name() const { return options_type()->type_name(); }
|
||||
|
||||
bool Equals(const FunctionOptions& other) const;
|
||||
using util::EqualityComparable<FunctionOptions>::Equals;
|
||||
using util::EqualityComparable<FunctionOptions>::operator==;
|
||||
using util::EqualityComparable<FunctionOptions>::operator!=;
|
||||
std::string ToString() const;
|
||||
std::unique_ptr<FunctionOptions> Copy() const;
|
||||
/// \brief Serialize an options struct to a buffer.
|
||||
Result<std::shared_ptr<Buffer>> Serialize() const;
|
||||
/// \brief Deserialize an options struct from a buffer.
|
||||
/// Note: this will only look for `type_name` in the default FunctionRegistry;
|
||||
/// to use a custom FunctionRegistry, look up the FunctionOptionsType, then
|
||||
/// call FunctionOptionsType::Deserialize().
|
||||
static Result<std::unique_ptr<FunctionOptions>> Deserialize(
|
||||
const std::string& type_name, const Buffer& buffer);
|
||||
|
||||
protected:
|
||||
explicit FunctionOptions(const FunctionOptionsType* type) : options_type_(type) {}
|
||||
const FunctionOptionsType* options_type_;
|
||||
};
|
||||
|
||||
ARROW_EXPORT void PrintTo(const FunctionOptions&, std::ostream*);
|
||||
|
||||
/// \brief Contains the number of required arguments for the function.
|
||||
///
|
||||
/// Naming conventions taken from https://en.wikipedia.org/wiki/Arity.
|
||||
struct ARROW_EXPORT Arity {
|
||||
/// \brief A function taking no arguments
|
||||
static Arity Nullary() { return Arity(0, false); }
|
||||
|
||||
/// \brief A function taking 1 argument
|
||||
static Arity Unary() { return Arity(1, false); }
|
||||
|
||||
/// \brief A function taking 2 arguments
|
||||
static Arity Binary() { return Arity(2, false); }
|
||||
|
||||
/// \brief A function taking 3 arguments
|
||||
static Arity Ternary() { return Arity(3, false); }
|
||||
|
||||
/// \brief A function taking a variable number of arguments
|
||||
///
|
||||
/// \param[in] min_args the minimum number of arguments required when
|
||||
/// invoking the function
|
||||
static Arity VarArgs(int min_args = 0) { return Arity(min_args, true); }
|
||||
|
||||
// NOTE: the 0-argument form (default constructor) is required for Cython
|
||||
explicit Arity(int num_args = 0, bool is_varargs = false)
|
||||
: num_args(num_args), is_varargs(is_varargs) {}
|
||||
|
||||
/// The number of required arguments (or the minimum number for varargs
|
||||
/// functions).
|
||||
int num_args;
|
||||
|
||||
/// If true, then the num_args is the minimum number of required arguments.
|
||||
bool is_varargs = false;
|
||||
};
|
||||
|
||||
struct ARROW_EXPORT FunctionDoc {
|
||||
/// \brief A one-line summary of the function, using a verb.
|
||||
///
|
||||
/// For example, "Add two numeric arrays or scalars".
|
||||
std::string summary;
|
||||
|
||||
/// \brief A detailed description of the function, meant to follow the summary.
|
||||
std::string description;
|
||||
|
||||
/// \brief Symbolic names (identifiers) for the function arguments.
|
||||
///
|
||||
/// Some bindings may use this to generate nicer function signatures.
|
||||
std::vector<std::string> arg_names;
|
||||
|
||||
// TODO add argument descriptions?
|
||||
|
||||
/// \brief Name of the options class, if any.
|
||||
std::string options_class;
|
||||
|
||||
/// \brief Whether options are required for function execution
|
||||
///
|
||||
/// If false, then either the function does not have an options class
|
||||
/// or there is a usable default options value.
|
||||
bool options_required;
|
||||
|
||||
FunctionDoc() = default;
|
||||
|
||||
FunctionDoc(std::string summary, std::string description,
|
||||
std::vector<std::string> arg_names, std::string options_class = "",
|
||||
bool options_required = false)
|
||||
: summary(std::move(summary)),
|
||||
description(std::move(description)),
|
||||
arg_names(std::move(arg_names)),
|
||||
options_class(std::move(options_class)),
|
||||
options_required(options_required) {}
|
||||
|
||||
static const FunctionDoc& Empty();
|
||||
};
|
||||
|
||||
/// \brief An executor of a function with a preconfigured kernel
|
||||
class ARROW_EXPORT FunctionExecutor {
|
||||
public:
|
||||
virtual ~FunctionExecutor() = default;
|
||||
/// \brief Initialize or re-initialize the preconfigured kernel
|
||||
///
|
||||
/// This method may be called zero or more times. Depending on how
|
||||
/// the FunctionExecutor was obtained, it may already have been initialized.
|
||||
virtual Status Init(const FunctionOptions* options = NULLPTR,
|
||||
ExecContext* exec_ctx = NULLPTR) = 0;
|
||||
/// \brief Execute the preconfigured kernel with arguments that must fit it
|
||||
///
|
||||
/// The method requires the arguments be castable to the preconfigured types.
|
||||
///
|
||||
/// \param[in] args Arguments to execute the function on
|
||||
/// \param[in] length Length of arguments batch or -1 to default it. If the
|
||||
/// function has no parameters, this determines the batch length, defaulting
|
||||
/// to 0. Otherwise, if the function is scalar, this must equal the argument
|
||||
/// batch's inferred length or be -1 to default to it. This is ignored for
|
||||
/// vector functions.
|
||||
virtual Result<Datum> Execute(const std::vector<Datum>& args, int64_t length = -1) = 0;
|
||||
};
|
||||
|
||||
/// \brief Base class for compute functions. Function implementations contain a
|
||||
/// collection of "kernels" which are implementations of the function for
|
||||
/// specific argument types. Selecting a viable kernel for executing a function
|
||||
/// is referred to as "dispatching".
|
||||
class ARROW_EXPORT Function {
|
||||
public:
|
||||
/// \brief The kind of function, which indicates in what contexts it is
|
||||
/// valid for use.
|
||||
enum Kind {
|
||||
/// A function that performs scalar data operations on whole arrays of
|
||||
/// data. Can generally process Array or Scalar values. The size of the
|
||||
/// output will be the same as the size (or broadcasted size, in the case
|
||||
/// of mixing Array and Scalar inputs) of the input.
|
||||
SCALAR,
|
||||
|
||||
/// A function with array input and output whose behavior depends on the
|
||||
/// values of the entire arrays passed, rather than the value of each scalar
|
||||
/// value.
|
||||
VECTOR,
|
||||
|
||||
/// A function that computes scalar summary statistics from array input.
|
||||
SCALAR_AGGREGATE,
|
||||
|
||||
/// A function that computes grouped summary statistics from array input
|
||||
/// and an array of group identifiers.
|
||||
HASH_AGGREGATE,
|
||||
|
||||
/// A function that dispatches to other functions and does not contain its
|
||||
/// own kernels.
|
||||
META
|
||||
};
|
||||
|
||||
virtual ~Function() = default;
|
||||
|
||||
/// \brief The name of the kernel. The registry enforces uniqueness of names.
|
||||
const std::string& name() const { return name_; }
|
||||
|
||||
/// \brief The kind of kernel, which indicates in what contexts it is valid
|
||||
/// for use.
|
||||
Function::Kind kind() const { return kind_; }
|
||||
|
||||
/// \brief Contains the number of arguments the function requires, or if the
|
||||
/// function accepts variable numbers of arguments.
|
||||
const Arity& arity() const { return arity_; }
|
||||
|
||||
/// \brief Return the function documentation
|
||||
const FunctionDoc& doc() const { return doc_; }
|
||||
|
||||
/// \brief Returns the number of registered kernels for this function.
|
||||
virtual int num_kernels() const = 0;
|
||||
|
||||
/// \brief Return a kernel that can execute the function given the exact
|
||||
/// argument types (without implicit type casts).
|
||||
///
|
||||
/// NB: This function is overridden in CastFunction.
|
||||
virtual Result<const Kernel*> DispatchExact(const std::vector<TypeHolder>& types) const;
|
||||
|
||||
/// \brief Return a best-match kernel that can execute the function given the argument
|
||||
/// types, after implicit casts are applied.
|
||||
///
|
||||
/// \param[in,out] values Argument types. An element may be modified to
|
||||
/// indicate that the returned kernel only approximately matches the input
|
||||
/// value descriptors; callers are responsible for casting inputs to the type
|
||||
/// required by the kernel.
|
||||
virtual Result<const Kernel*> DispatchBest(std::vector<TypeHolder>* values) const;
|
||||
|
||||
/// \brief Get a function executor with a best-matching kernel
|
||||
///
|
||||
/// The returned executor will by default work with the default FunctionOptions
|
||||
/// and KernelContext. If you want to change that, call `FunctionExecutor::Init`.
|
||||
virtual Result<std::shared_ptr<FunctionExecutor>> GetBestExecutor(
|
||||
std::vector<TypeHolder> inputs) const;
|
||||
|
||||
/// \brief Execute the function eagerly with the passed input arguments with
|
||||
/// kernel dispatch, batch iteration, and memory allocation details taken
|
||||
/// care of.
|
||||
///
|
||||
/// If the `options` pointer is null, then `default_options()` will be used.
|
||||
///
|
||||
/// This function can be overridden in subclasses.
|
||||
virtual Result<Datum> Execute(const std::vector<Datum>& args,
|
||||
const FunctionOptions* options, ExecContext* ctx) const;
|
||||
|
||||
virtual Result<Datum> Execute(const ExecBatch& batch, const FunctionOptions* options,
|
||||
ExecContext* ctx) const;
|
||||
|
||||
/// \brief Returns the default options for this function.
|
||||
///
|
||||
/// Whatever option semantics a Function has, implementations must guarantee
|
||||
/// that default_options() is valid to pass to Execute as options.
|
||||
const FunctionOptions* default_options() const { return default_options_; }
|
||||
|
||||
virtual Status Validate() const;
|
||||
|
||||
protected:
|
||||
Function(std::string name, Function::Kind kind, const Arity& arity, FunctionDoc doc,
|
||||
const FunctionOptions* default_options)
|
||||
: name_(std::move(name)),
|
||||
kind_(kind),
|
||||
arity_(arity),
|
||||
doc_(std::move(doc)),
|
||||
default_options_(default_options) {}
|
||||
|
||||
Status CheckArity(size_t num_args) const;
|
||||
|
||||
std::string name_;
|
||||
Function::Kind kind_;
|
||||
Arity arity_;
|
||||
const FunctionDoc doc_;
|
||||
const FunctionOptions* default_options_ = NULLPTR;
|
||||
};
|
||||
|
||||
namespace detail {
|
||||
|
||||
template <typename KernelType>
|
||||
class FunctionImpl : public Function {
|
||||
public:
|
||||
/// \brief Return pointers to current-available kernels for inspection
|
||||
std::vector<const KernelType*> kernels() const {
|
||||
std::vector<const KernelType*> result;
|
||||
for (const auto& kernel : kernels_) {
|
||||
result.push_back(&kernel);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int num_kernels() const override { return static_cast<int>(kernels_.size()); }
|
||||
|
||||
protected:
|
||||
FunctionImpl(std::string name, Function::Kind kind, const Arity& arity, FunctionDoc doc,
|
||||
const FunctionOptions* default_options)
|
||||
: Function(std::move(name), kind, arity, std::move(doc), default_options) {}
|
||||
|
||||
std::vector<KernelType> kernels_;
|
||||
};
|
||||
|
||||
/// \brief Look up a kernel in a function. If no Kernel is found, nullptr is returned.
|
||||
ARROW_EXPORT
|
||||
const Kernel* DispatchExactImpl(const Function* func, const std::vector<TypeHolder>&);
|
||||
|
||||
/// \brief Return an error message if no Kernel is found.
|
||||
ARROW_EXPORT
|
||||
Status NoMatchingKernel(const Function* func, const std::vector<TypeHolder>&);
|
||||
|
||||
} // namespace detail
|
||||
|
||||
/// \brief A function that executes elementwise operations on arrays or
|
||||
/// scalars, and therefore whose results generally do not depend on the order
|
||||
/// of the values in the arguments. Accepts and returns arrays that are all of
|
||||
/// the same size. These functions roughly correspond to the functions used in
|
||||
/// SQL expressions.
|
||||
class ARROW_EXPORT ScalarFunction : public detail::FunctionImpl<ScalarKernel> {
|
||||
public:
|
||||
using KernelType = ScalarKernel;
|
||||
|
||||
ScalarFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
||||
const FunctionOptions* default_options = NULLPTR)
|
||||
: detail::FunctionImpl<ScalarKernel>(std::move(name), Function::SCALAR, arity,
|
||||
std::move(doc), default_options) {}
|
||||
|
||||
/// \brief Add a kernel with given input/output types, no required state
|
||||
/// initialization, preallocation for fixed-width types, and default null
|
||||
/// handling (intersect validity bitmaps of inputs).
|
||||
Status AddKernel(std::vector<InputType> in_types, OutputType out_type,
|
||||
ArrayKernelExec exec, KernelInit init = NULLPTR);
|
||||
|
||||
/// \brief Add a kernel (function implementation). Returns error if the
|
||||
/// kernel's signature does not match the function's arity.
|
||||
Status AddKernel(ScalarKernel kernel);
|
||||
};
|
||||
|
||||
/// \brief A function that executes general array operations that may yield
|
||||
/// outputs of different sizes or have results that depend on the whole array
|
||||
/// contents. These functions roughly correspond to the functions found in
|
||||
/// non-SQL array languages like APL and its derivatives.
|
||||
class ARROW_EXPORT VectorFunction : public detail::FunctionImpl<VectorKernel> {
|
||||
public:
|
||||
using KernelType = VectorKernel;
|
||||
|
||||
VectorFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
||||
const FunctionOptions* default_options = NULLPTR)
|
||||
: detail::FunctionImpl<VectorKernel>(std::move(name), Function::VECTOR, arity,
|
||||
std::move(doc), default_options) {}
|
||||
|
||||
/// \brief Add a simple kernel with given input/output types, no required
|
||||
/// state initialization, no data preallocation, and no preallocation of the
|
||||
/// validity bitmap.
|
||||
Status AddKernel(std::vector<InputType> in_types, OutputType out_type,
|
||||
ArrayKernelExec exec, KernelInit init = NULLPTR);
|
||||
|
||||
/// \brief Add a kernel (function implementation). Returns error if the
|
||||
/// kernel's signature does not match the function's arity.
|
||||
Status AddKernel(VectorKernel kernel);
|
||||
};
|
||||
|
||||
class ARROW_EXPORT ScalarAggregateFunction
|
||||
: public detail::FunctionImpl<ScalarAggregateKernel> {
|
||||
public:
|
||||
using KernelType = ScalarAggregateKernel;
|
||||
|
||||
ScalarAggregateFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
||||
const FunctionOptions* default_options = NULLPTR)
|
||||
: detail::FunctionImpl<ScalarAggregateKernel>(std::move(name),
|
||||
Function::SCALAR_AGGREGATE, arity,
|
||||
std::move(doc), default_options) {}
|
||||
|
||||
/// \brief Add a kernel (function implementation). Returns error if the
|
||||
/// kernel's signature does not match the function's arity.
|
||||
Status AddKernel(ScalarAggregateKernel kernel);
|
||||
};
|
||||
|
||||
class ARROW_EXPORT HashAggregateFunction
|
||||
: public detail::FunctionImpl<HashAggregateKernel> {
|
||||
public:
|
||||
using KernelType = HashAggregateKernel;
|
||||
|
||||
HashAggregateFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
||||
const FunctionOptions* default_options = NULLPTR)
|
||||
: detail::FunctionImpl<HashAggregateKernel>(std::move(name),
|
||||
Function::HASH_AGGREGATE, arity,
|
||||
std::move(doc), default_options) {}
|
||||
|
||||
/// \brief Add a kernel (function implementation). Returns error if the
|
||||
/// kernel's signature does not match the function's arity.
|
||||
Status AddKernel(HashAggregateKernel kernel);
|
||||
};
|
||||
|
||||
/// \brief A function that dispatches to other functions. Must implement
|
||||
/// MetaFunction::ExecuteImpl.
|
||||
///
|
||||
/// For Array, ChunkedArray, and Scalar Datum kinds, may rely on the execution
|
||||
/// of concrete Function types, but must handle other Datum kinds on its own.
|
||||
class ARROW_EXPORT MetaFunction : public Function {
|
||||
public:
|
||||
int num_kernels() const override { return 0; }
|
||||
|
||||
Result<Datum> Execute(const std::vector<Datum>& args, const FunctionOptions* options,
|
||||
ExecContext* ctx) const override;
|
||||
|
||||
Result<Datum> Execute(const ExecBatch& batch, const FunctionOptions* options,
|
||||
ExecContext* ctx) const override;
|
||||
|
||||
protected:
|
||||
virtual Result<Datum> ExecuteImpl(const std::vector<Datum>& args,
|
||||
const FunctionOptions* options,
|
||||
ExecContext* ctx) const = 0;
|
||||
|
||||
MetaFunction(std::string name, const Arity& arity, FunctionDoc doc,
|
||||
const FunctionOptions* default_options = NULLPTR)
|
||||
: Function(std::move(name), Function::META, arity, std::move(doc),
|
||||
default_options) {}
|
||||
};
|
||||
|
||||
/// @}
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,705 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
// NOTE: API is EXPERIMENTAL and will change without going through a
|
||||
// deprecation cycle
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/buffer.h"
|
||||
#include "arrow/compute/exec.h"
|
||||
#include "arrow/datum.h"
|
||||
#include "arrow/memory_pool.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/status.h"
|
||||
#include "arrow/type.h"
|
||||
#include "arrow/util/macros.h"
|
||||
#include "arrow/util/visibility.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
class FunctionOptions;
|
||||
|
||||
/// \brief Base class for opaque kernel-specific state. For example, if there
|
||||
/// is some kind of initialization required.
|
||||
struct ARROW_EXPORT KernelState {
|
||||
virtual ~KernelState() = default;
|
||||
};
|
||||
|
||||
/// \brief Context/state for the execution of a particular kernel.
|
||||
class ARROW_EXPORT KernelContext {
|
||||
public:
|
||||
// Can pass optional backreference; not used consistently for the
|
||||
// moment but will be made so in the future
|
||||
explicit KernelContext(ExecContext* exec_ctx, const Kernel* kernel = NULLPTR)
|
||||
: exec_ctx_(exec_ctx), kernel_(kernel) {}
|
||||
|
||||
/// \brief Allocate buffer from the context's memory pool. The contents are
|
||||
/// not initialized.
|
||||
Result<std::shared_ptr<ResizableBuffer>> Allocate(int64_t nbytes);
|
||||
|
||||
/// \brief Allocate buffer for bitmap from the context's memory pool. Like
|
||||
/// Allocate, the contents of the buffer are not initialized but the last
|
||||
/// byte is preemptively zeroed to help avoid ASAN or valgrind issues.
|
||||
Result<std::shared_ptr<ResizableBuffer>> AllocateBitmap(int64_t num_bits);
|
||||
|
||||
/// \brief Assign the active KernelState to be utilized for each stage of
|
||||
/// kernel execution. Ownership and memory lifetime of the KernelState must
|
||||
/// be minded separately.
|
||||
void SetState(KernelState* state) { state_ = state; }
|
||||
|
||||
// Set kernel that is being invoked since some kernel
|
||||
// implementations will examine the kernel state.
|
||||
void SetKernel(const Kernel* kernel) { kernel_ = kernel; }
|
||||
|
||||
KernelState* state() { return state_; }
|
||||
|
||||
/// \brief Configuration related to function execution that is to be shared
|
||||
/// across multiple kernels.
|
||||
ExecContext* exec_context() { return exec_ctx_; }
|
||||
|
||||
/// \brief The memory pool to use for allocations. For now, it uses the
|
||||
/// MemoryPool contained in the ExecContext used to create the KernelContext.
|
||||
MemoryPool* memory_pool() { return exec_ctx_->memory_pool(); }
|
||||
|
||||
const Kernel* kernel() const { return kernel_; }
|
||||
|
||||
private:
|
||||
ExecContext* exec_ctx_;
|
||||
KernelState* state_ = NULLPTR;
|
||||
const Kernel* kernel_ = NULLPTR;
|
||||
};
|
||||
|
||||
/// \brief An type-checking interface to permit customizable validation rules
|
||||
/// for use with InputType and KernelSignature. This is for scenarios where the
|
||||
/// acceptance is not an exact type instance, such as a TIMESTAMP type for a
|
||||
/// specific TimeUnit, but permitting any time zone.
|
||||
struct ARROW_EXPORT TypeMatcher {
|
||||
virtual ~TypeMatcher() = default;
|
||||
|
||||
/// \brief Return true if this matcher accepts the data type.
|
||||
virtual bool Matches(const DataType& type) const = 0;
|
||||
|
||||
/// \brief A human-interpretable string representation of what the type
|
||||
/// matcher checks for, usable when printing KernelSignature or formatting
|
||||
/// error messages.
|
||||
virtual std::string ToString() const = 0;
|
||||
|
||||
/// \brief Return true if this TypeMatcher contains the same matching rule as
|
||||
/// the other. Currently depends on RTTI.
|
||||
virtual bool Equals(const TypeMatcher& other) const = 0;
|
||||
};
|
||||
|
||||
namespace match {
|
||||
|
||||
/// \brief Match any DataType instance having the same DataType::id.
|
||||
ARROW_EXPORT std::shared_ptr<TypeMatcher> SameTypeId(Type::type type_id);
|
||||
|
||||
/// \brief Match any TimestampType instance having the same unit, but the time
|
||||
/// zones can be different.
|
||||
ARROW_EXPORT std::shared_ptr<TypeMatcher> TimestampTypeUnit(TimeUnit::type unit);
|
||||
ARROW_EXPORT std::shared_ptr<TypeMatcher> Time32TypeUnit(TimeUnit::type unit);
|
||||
ARROW_EXPORT std::shared_ptr<TypeMatcher> Time64TypeUnit(TimeUnit::type unit);
|
||||
ARROW_EXPORT std::shared_ptr<TypeMatcher> DurationTypeUnit(TimeUnit::type unit);
|
||||
|
||||
// \brief Match any integer type
|
||||
ARROW_EXPORT std::shared_ptr<TypeMatcher> Integer();
|
||||
|
||||
// Match types using 32-bit varbinary representation
|
||||
ARROW_EXPORT std::shared_ptr<TypeMatcher> BinaryLike();
|
||||
|
||||
// Match types using 64-bit varbinary representation
|
||||
ARROW_EXPORT std::shared_ptr<TypeMatcher> LargeBinaryLike();
|
||||
|
||||
// Match any fixed binary type
|
||||
ARROW_EXPORT std::shared_ptr<TypeMatcher> FixedSizeBinaryLike();
|
||||
|
||||
// \brief Match any primitive type (boolean or any type representable as a C
|
||||
// Type)
|
||||
ARROW_EXPORT std::shared_ptr<TypeMatcher> Primitive();
|
||||
|
||||
} // namespace match
|
||||
|
||||
/// \brief An object used for type-checking arguments to be passed to a kernel
|
||||
/// and stored in a KernelSignature. The type-checking rule can be supplied
|
||||
/// either with an exact DataType instance or a custom TypeMatcher.
|
||||
class ARROW_EXPORT InputType {
|
||||
public:
|
||||
/// \brief The kind of type-checking rule that the InputType contains.
|
||||
enum Kind {
|
||||
/// \brief Accept any value type.
|
||||
ANY_TYPE,
|
||||
|
||||
/// \brief A fixed arrow::DataType and will only exact match having this
|
||||
/// exact type (e.g. same TimestampType unit, same decimal scale and
|
||||
/// precision, or same nested child types).
|
||||
EXACT_TYPE,
|
||||
|
||||
/// \brief Uses a TypeMatcher implementation to check the type.
|
||||
USE_TYPE_MATCHER
|
||||
};
|
||||
|
||||
/// \brief Accept any value type
|
||||
InputType() : kind_(ANY_TYPE) {}
|
||||
|
||||
/// \brief Accept an exact value type.
|
||||
InputType(std::shared_ptr<DataType> type) // NOLINT implicit construction
|
||||
: kind_(EXACT_TYPE), type_(std::move(type)) {}
|
||||
|
||||
/// \brief Use the passed TypeMatcher to type check.
|
||||
InputType(std::shared_ptr<TypeMatcher> type_matcher) // NOLINT implicit construction
|
||||
: kind_(USE_TYPE_MATCHER), type_matcher_(std::move(type_matcher)) {}
|
||||
|
||||
/// \brief Match any type with the given Type::type. Uses a TypeMatcher for
|
||||
/// its implementation.
|
||||
InputType(Type::type type_id) // NOLINT implicit construction
|
||||
: InputType(match::SameTypeId(type_id)) {}
|
||||
|
||||
InputType(const InputType& other) { CopyInto(other); }
|
||||
|
||||
void operator=(const InputType& other) { CopyInto(other); }
|
||||
|
||||
InputType(InputType&& other) { MoveInto(std::forward<InputType>(other)); }
|
||||
|
||||
void operator=(InputType&& other) { MoveInto(std::forward<InputType>(other)); }
|
||||
|
||||
// \brief Match any input (array, scalar of any type)
|
||||
static InputType Any() { return InputType(); }
|
||||
|
||||
/// \brief Return true if this input type matches the same type cases as the
|
||||
/// other.
|
||||
bool Equals(const InputType& other) const;
|
||||
|
||||
bool operator==(const InputType& other) const { return this->Equals(other); }
|
||||
|
||||
bool operator!=(const InputType& other) const { return !(*this == other); }
|
||||
|
||||
/// \brief Return hash code.
|
||||
size_t Hash() const;
|
||||
|
||||
/// \brief Render a human-readable string representation.
|
||||
std::string ToString() const;
|
||||
|
||||
/// \brief Return true if the Datum matches this argument kind in
|
||||
/// type (and only allows scalar or array-like Datums).
|
||||
bool Matches(const Datum& value) const;
|
||||
|
||||
/// \brief Return true if the type matches this InputType
|
||||
bool Matches(const DataType& type) const;
|
||||
|
||||
/// \brief The type matching rule that this InputType uses.
|
||||
Kind kind() const { return kind_; }
|
||||
|
||||
/// \brief For InputType::EXACT_TYPE kind, the exact type that this InputType
|
||||
/// must match. Otherwise this function should not be used and will assert in
|
||||
/// debug builds.
|
||||
const std::shared_ptr<DataType>& type() const;
|
||||
|
||||
/// \brief For InputType::USE_TYPE_MATCHER, the TypeMatcher to be used for
|
||||
/// checking the type of a value. Otherwise this function should not be used
|
||||
/// and will assert in debug builds.
|
||||
const TypeMatcher& type_matcher() const;
|
||||
|
||||
private:
|
||||
void CopyInto(const InputType& other) {
|
||||
this->kind_ = other.kind_;
|
||||
this->type_ = other.type_;
|
||||
this->type_matcher_ = other.type_matcher_;
|
||||
}
|
||||
|
||||
void MoveInto(InputType&& other) {
|
||||
this->kind_ = other.kind_;
|
||||
this->type_ = std::move(other.type_);
|
||||
this->type_matcher_ = std::move(other.type_matcher_);
|
||||
}
|
||||
|
||||
Kind kind_;
|
||||
|
||||
// For EXACT_TYPE Kind
|
||||
std::shared_ptr<DataType> type_;
|
||||
|
||||
// For USE_TYPE_MATCHER Kind
|
||||
std::shared_ptr<TypeMatcher> type_matcher_;
|
||||
};
|
||||
|
||||
/// \brief Container to capture both exact and input-dependent output types.
|
||||
class ARROW_EXPORT OutputType {
|
||||
public:
|
||||
/// \brief An enum indicating whether the value type is an invariant fixed
|
||||
/// value or one that's computed by a kernel-defined resolver function.
|
||||
enum ResolveKind { FIXED, COMPUTED };
|
||||
|
||||
/// Type resolution function. Given input types, return output type. This
|
||||
/// function MAY may use the kernel state to decide the output type based on
|
||||
/// the FunctionOptions.
|
||||
///
|
||||
/// This function SHOULD _not_ be used to check for arity, that is to be
|
||||
/// performed one or more layers above.
|
||||
using Resolver = Result<TypeHolder> (*)(KernelContext*, const std::vector<TypeHolder>&);
|
||||
|
||||
/// \brief Output an exact type
|
||||
OutputType(std::shared_ptr<DataType> type) // NOLINT implicit construction
|
||||
: kind_(FIXED), type_(std::move(type)) {}
|
||||
|
||||
/// \brief Output a computed type depending on actual input types
|
||||
OutputType(Resolver resolver) // NOLINT implicit construction
|
||||
: kind_(COMPUTED), resolver_(std::move(resolver)) {}
|
||||
|
||||
OutputType(const OutputType& other) {
|
||||
this->kind_ = other.kind_;
|
||||
this->type_ = other.type_;
|
||||
this->resolver_ = other.resolver_;
|
||||
}
|
||||
|
||||
OutputType(OutputType&& other) {
|
||||
this->kind_ = other.kind_;
|
||||
this->type_ = std::move(other.type_);
|
||||
this->resolver_ = other.resolver_;
|
||||
}
|
||||
|
||||
OutputType& operator=(const OutputType&) = default;
|
||||
OutputType& operator=(OutputType&&) = default;
|
||||
|
||||
/// \brief Return the type of the expected output value of the kernel given
|
||||
/// the input argument types. The resolver may make use of state information
|
||||
/// kept in the KernelContext.
|
||||
Result<TypeHolder> Resolve(KernelContext* ctx,
|
||||
const std::vector<TypeHolder>& args) const;
|
||||
|
||||
/// \brief The exact output value type for the FIXED kind.
|
||||
const std::shared_ptr<DataType>& type() const;
|
||||
|
||||
/// \brief For use with COMPUTED resolution strategy. It may be more
|
||||
/// convenient to invoke this with OutputType::Resolve returned from this
|
||||
/// method.
|
||||
const Resolver& resolver() const;
|
||||
|
||||
/// \brief Render a human-readable string representation.
|
||||
std::string ToString() const;
|
||||
|
||||
/// \brief Return the kind of type resolution of this output type, whether
|
||||
/// fixed/invariant or computed by a resolver.
|
||||
ResolveKind kind() const { return kind_; }
|
||||
|
||||
private:
|
||||
ResolveKind kind_;
|
||||
|
||||
// For FIXED resolution
|
||||
std::shared_ptr<DataType> type_;
|
||||
|
||||
// For COMPUTED resolution
|
||||
Resolver resolver_ = NULLPTR;
|
||||
};
|
||||
|
||||
/// \brief Holds the input types and output type of the kernel.
|
||||
///
|
||||
/// VarArgs functions with minimum N arguments should pass up to N input types to be
|
||||
/// used to validate the input types of a function invocation. The first N-1 types
|
||||
/// will be matched against the first N-1 arguments, and the last type will be
|
||||
/// matched against the remaining arguments.
|
||||
class ARROW_EXPORT KernelSignature {
|
||||
public:
|
||||
KernelSignature(std::vector<InputType> in_types, OutputType out_type,
|
||||
bool is_varargs = false);
|
||||
|
||||
/// \brief Convenience ctor since make_shared can be awkward
|
||||
static std::shared_ptr<KernelSignature> Make(std::vector<InputType> in_types,
|
||||
OutputType out_type,
|
||||
bool is_varargs = false);
|
||||
|
||||
/// \brief Return true if the signature if compatible with the list of input
|
||||
/// value descriptors.
|
||||
bool MatchesInputs(const std::vector<TypeHolder>& types) const;
|
||||
|
||||
/// \brief Returns true if the input types of each signature are
|
||||
/// equal. Well-formed functions should have a deterministic output type
|
||||
/// given input types, but currently it is the responsibility of the
|
||||
/// developer to ensure this.
|
||||
bool Equals(const KernelSignature& other) const;
|
||||
|
||||
bool operator==(const KernelSignature& other) const { return this->Equals(other); }
|
||||
|
||||
bool operator!=(const KernelSignature& other) const { return !(*this == other); }
|
||||
|
||||
/// \brief Compute a hash code for the signature
|
||||
size_t Hash() const;
|
||||
|
||||
/// \brief The input types for the kernel. For VarArgs functions, this should
|
||||
/// generally contain a single validator to use for validating all of the
|
||||
/// function arguments.
|
||||
const std::vector<InputType>& in_types() const { return in_types_; }
|
||||
|
||||
/// \brief The output type for the kernel. Use Resolve to return the
|
||||
/// exact output given input argument types, since many kernels'
|
||||
/// output types depend on their input types (or their type
|
||||
/// metadata).
|
||||
const OutputType& out_type() const { return out_type_; }
|
||||
|
||||
/// \brief Render a human-readable string representation
|
||||
std::string ToString() const;
|
||||
|
||||
bool is_varargs() const { return is_varargs_; }
|
||||
|
||||
private:
|
||||
std::vector<InputType> in_types_;
|
||||
OutputType out_type_;
|
||||
bool is_varargs_;
|
||||
|
||||
// For caching the hash code after it's computed the first time
|
||||
mutable uint64_t hash_code_;
|
||||
};
|
||||
|
||||
/// \brief A function may contain multiple variants of a kernel for a given
|
||||
/// type combination for different SIMD levels. Based on the active system's
|
||||
/// CPU info or the user's preferences, we can elect to use one over the other.
|
||||
struct SimdLevel {
|
||||
enum type { NONE = 0, SSE4_2, AVX, AVX2, AVX512, NEON, MAX };
|
||||
};
|
||||
|
||||
/// \brief The strategy to use for propagating or otherwise populating the
|
||||
/// validity bitmap of a kernel output.
|
||||
struct NullHandling {
|
||||
enum type {
|
||||
/// Compute the output validity bitmap by intersecting the validity bitmaps
|
||||
/// of the arguments using bitwise-and operations. This means that values
|
||||
/// in the output are valid/non-null only if the corresponding values in
|
||||
/// all input arguments were valid/non-null. Kernel generally need not
|
||||
/// touch the bitmap thereafter, but a kernel's exec function is permitted
|
||||
/// to alter the bitmap after the null intersection is computed if it needs
|
||||
/// to.
|
||||
INTERSECTION,
|
||||
|
||||
/// Kernel expects a pre-allocated buffer to write the result bitmap
|
||||
/// into. The preallocated memory is not zeroed (except for the last byte),
|
||||
/// so the kernel should ensure to completely populate the bitmap.
|
||||
COMPUTED_PREALLOCATE,
|
||||
|
||||
/// Kernel allocates and sets the validity bitmap of the output.
|
||||
COMPUTED_NO_PREALLOCATE,
|
||||
|
||||
/// Kernel output is never null and a validity bitmap does not need to be
|
||||
/// allocated.
|
||||
OUTPUT_NOT_NULL
|
||||
};
|
||||
};
|
||||
|
||||
/// \brief The preference for memory preallocation of fixed-width type outputs
|
||||
/// in kernel execution.
|
||||
struct MemAllocation {
|
||||
enum type {
|
||||
// For data types that support pre-allocation (i.e. fixed-width), the
|
||||
// kernel expects to be provided a pre-allocated data buffer to write
|
||||
// into. Non-fixed-width types must always allocate their own data
|
||||
// buffers. The allocation made for the same length as the execution batch,
|
||||
// so vector kernels yielding differently sized output should not use this.
|
||||
//
|
||||
// It is valid for the data to not be preallocated but the validity bitmap
|
||||
// is (or is computed using the intersection/bitwise-and method).
|
||||
//
|
||||
// For variable-size output types like BinaryType or StringType, or for
|
||||
// nested types, this option has no effect.
|
||||
PREALLOCATE,
|
||||
|
||||
// The kernel is responsible for allocating its own data buffer for
|
||||
// fixed-width type outputs.
|
||||
NO_PREALLOCATE
|
||||
};
|
||||
};
|
||||
|
||||
struct Kernel;
|
||||
|
||||
/// \brief Arguments to pass to an KernelInit function. A struct is used to help
|
||||
/// avoid API breakage should the arguments passed need to be expanded.
|
||||
struct KernelInitArgs {
|
||||
/// \brief A pointer to the kernel being initialized. The init function may
|
||||
/// depend on the kernel's KernelSignature or other data contained there.
|
||||
const Kernel* kernel;
|
||||
|
||||
/// \brief The types of the input arguments that the kernel is
|
||||
/// about to be executed against.
|
||||
const std::vector<TypeHolder>& inputs;
|
||||
|
||||
/// \brief Opaque options specific to this kernel. May be nullptr for functions
|
||||
/// that do not require options.
|
||||
const FunctionOptions* options;
|
||||
};
|
||||
|
||||
/// \brief Common initializer function for all kernel types.
|
||||
using KernelInit = std::function<Result<std::unique_ptr<KernelState>>(
|
||||
KernelContext*, const KernelInitArgs&)>;
|
||||
|
||||
/// \brief Base type for kernels. Contains the function signature and
|
||||
/// optionally the state initialization function, along with some common
|
||||
/// attributes
|
||||
struct Kernel {
|
||||
Kernel() = default;
|
||||
|
||||
Kernel(std::shared_ptr<KernelSignature> sig, KernelInit init)
|
||||
: signature(std::move(sig)), init(std::move(init)) {}
|
||||
|
||||
Kernel(std::vector<InputType> in_types, OutputType out_type, KernelInit init)
|
||||
: Kernel(KernelSignature::Make(std::move(in_types), std::move(out_type)),
|
||||
std::move(init)) {}
|
||||
|
||||
/// \brief The "signature" of the kernel containing the InputType input
|
||||
/// argument validators and OutputType output type resolver.
|
||||
std::shared_ptr<KernelSignature> signature;
|
||||
|
||||
/// \brief Create a new KernelState for invocations of this kernel, e.g. to
|
||||
/// set up any options or state relevant for execution.
|
||||
KernelInit init;
|
||||
|
||||
/// \brief Create a vector of new KernelState for invocations of this kernel.
|
||||
static Status InitAll(KernelContext*, const KernelInitArgs&,
|
||||
std::vector<std::unique_ptr<KernelState>>*);
|
||||
|
||||
/// \brief Indicates whether execution can benefit from parallelization
|
||||
/// (splitting large chunks into smaller chunks and using multiple
|
||||
/// threads). Some kernels may not support parallel execution at
|
||||
/// all. Synchronization and concurrency-related issues are currently the
|
||||
/// responsibility of the Kernel's implementation.
|
||||
bool parallelizable = true;
|
||||
|
||||
/// \brief Indicates the level of SIMD instruction support in the host CPU is
|
||||
/// required to use the function. The intention is for functions to be able to
|
||||
/// contain multiple kernels with the same signature but different levels of SIMD,
|
||||
/// so that the most optimized kernel supported on a host's processor can be chosen.
|
||||
SimdLevel::type simd_level = SimdLevel::NONE;
|
||||
|
||||
// Additional kernel-specific data
|
||||
std::shared_ptr<KernelState> data;
|
||||
};
|
||||
|
||||
/// \brief The scalar kernel execution API that must be implemented for SCALAR
|
||||
/// kernel types. This includes both stateless and stateful kernels. Kernels
|
||||
/// depending on some execution state access that state via subclasses of
|
||||
/// KernelState set on the KernelContext object. Implementations should
|
||||
/// endeavor to write into pre-allocated memory if they are able, though for
|
||||
/// some kernels (e.g. in cases when a builder like StringBuilder) must be
|
||||
/// employed this may not be possible.
|
||||
using ArrayKernelExec = Status (*)(KernelContext*, const ExecSpan&, ExecResult*);
|
||||
|
||||
/// \brief Kernel data structure for implementations of ScalarFunction. In
|
||||
/// addition to the members found in Kernel, contains the null handling
|
||||
/// and memory pre-allocation preferences.
|
||||
struct ScalarKernel : public Kernel {
|
||||
ScalarKernel() = default;
|
||||
|
||||
ScalarKernel(std::shared_ptr<KernelSignature> sig, ArrayKernelExec exec,
|
||||
KernelInit init = NULLPTR)
|
||||
: Kernel(std::move(sig), init), exec(exec) {}
|
||||
|
||||
ScalarKernel(std::vector<InputType> in_types, OutputType out_type, ArrayKernelExec exec,
|
||||
KernelInit init = NULLPTR)
|
||||
: Kernel(std::move(in_types), std::move(out_type), std::move(init)), exec(exec) {}
|
||||
|
||||
/// \brief Perform a single invocation of this kernel. Depending on the
|
||||
/// implementation, it may only write into preallocated memory, while in some
|
||||
/// cases it will allocate its own memory. Any required state is managed
|
||||
/// through the KernelContext.
|
||||
ArrayKernelExec exec;
|
||||
|
||||
/// \brief Writing execution results into larger contiguous allocations
|
||||
/// requires that the kernel be able to write into sliced output ArrayData*,
|
||||
/// including sliced output validity bitmaps. Some kernel implementations may
|
||||
/// not be able to do this, so setting this to false disables this
|
||||
/// functionality.
|
||||
bool can_write_into_slices = true;
|
||||
|
||||
// For scalar functions preallocated data and intersecting arg validity
|
||||
// bitmaps is a reasonable default
|
||||
NullHandling::type null_handling = NullHandling::INTERSECTION;
|
||||
MemAllocation::type mem_allocation = MemAllocation::PREALLOCATE;
|
||||
};
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
// VectorKernel (for VectorFunction)
|
||||
|
||||
/// \brief Kernel data structure for implementations of VectorFunction. In
|
||||
/// contains an optional finalizer function, the null handling and memory
|
||||
/// pre-allocation preferences (which have different defaults from
|
||||
/// ScalarKernel), and some other execution-related options.
|
||||
struct VectorKernel : public Kernel {
|
||||
/// \brief See VectorKernel::finalize member for usage
|
||||
using FinalizeFunc = std::function<Status(KernelContext*, std::vector<Datum>*)>;
|
||||
|
||||
/// \brief Function for executing a stateful VectorKernel against a
|
||||
/// ChunkedArray input. Does not need to be defined for all VectorKernels
|
||||
using ChunkedExec = Status (*)(KernelContext*, const ExecBatch&, Datum* out);
|
||||
|
||||
VectorKernel() = default;
|
||||
|
||||
VectorKernel(std::vector<InputType> in_types, OutputType out_type, ArrayKernelExec exec,
|
||||
KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR)
|
||||
: Kernel(std::move(in_types), std::move(out_type), std::move(init)),
|
||||
exec(exec),
|
||||
finalize(std::move(finalize)) {}
|
||||
|
||||
VectorKernel(std::shared_ptr<KernelSignature> sig, ArrayKernelExec exec,
|
||||
KernelInit init = NULLPTR, FinalizeFunc finalize = NULLPTR)
|
||||
: Kernel(std::move(sig), std::move(init)),
|
||||
exec(exec),
|
||||
finalize(std::move(finalize)) {}
|
||||
|
||||
/// \brief Perform a single invocation of this kernel. Any required state is
|
||||
/// managed through the KernelContext.
|
||||
ArrayKernelExec exec;
|
||||
|
||||
/// \brief Execute the kernel on a ChunkedArray. Does not need to be defined
|
||||
ChunkedExec exec_chunked = NULLPTR;
|
||||
|
||||
/// \brief For VectorKernel, convert intermediate results into finalized
|
||||
/// results. Mutates input argument. Some kernels may accumulate state
|
||||
/// (example: hashing-related functions) through processing chunked inputs, and
|
||||
/// then need to attach some accumulated state to each of the outputs of
|
||||
/// processing each chunk of data.
|
||||
FinalizeFunc finalize;
|
||||
|
||||
/// Since vector kernels generally are implemented rather differently from
|
||||
/// scalar/elementwise kernels (and they may not even yield arrays of the same
|
||||
/// size), so we make the developer opt-in to any memory preallocation rather
|
||||
/// than having to turn it off.
|
||||
NullHandling::type null_handling = NullHandling::COMPUTED_NO_PREALLOCATE;
|
||||
MemAllocation::type mem_allocation = MemAllocation::NO_PREALLOCATE;
|
||||
|
||||
/// \brief Writing execution results into larger contiguous allocations
|
||||
/// requires that the kernel be able to write into sliced output ArrayData*,
|
||||
/// including sliced output validity bitmaps. Some kernel implementations may
|
||||
/// not be able to do this, so setting this to false disables this
|
||||
/// functionality.
|
||||
bool can_write_into_slices = true;
|
||||
|
||||
/// Some vector kernels can do chunkwise execution using ExecSpanIterator,
|
||||
/// in some cases accumulating some state. Other kernels (like Take) need to
|
||||
/// be passed whole arrays and don't work on ChunkedArray inputs
|
||||
bool can_execute_chunkwise = true;
|
||||
|
||||
/// Some kernels (like unique and value_counts) yield non-chunked output from
|
||||
/// chunked-array inputs. This option controls how the results are boxed when
|
||||
/// returned from ExecVectorFunction
|
||||
///
|
||||
/// true -> ChunkedArray
|
||||
/// false -> Array
|
||||
bool output_chunked = true;
|
||||
};
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
// ScalarAggregateKernel (for ScalarAggregateFunction)
|
||||
|
||||
using ScalarAggregateConsume = Status (*)(KernelContext*, const ExecSpan&);
|
||||
using ScalarAggregateMerge = Status (*)(KernelContext*, KernelState&&, KernelState*);
|
||||
// Finalize returns Datum to permit multiple return values
|
||||
using ScalarAggregateFinalize = Status (*)(KernelContext*, Datum*);
|
||||
|
||||
/// \brief Kernel data structure for implementations of
|
||||
/// ScalarAggregateFunction. The four necessary components of an aggregation
|
||||
/// kernel are the init, consume, merge, and finalize functions.
|
||||
///
|
||||
/// * init: creates a new KernelState for a kernel.
|
||||
/// * consume: processes an ExecSpan and updates the KernelState found in the
|
||||
/// KernelContext.
|
||||
/// * merge: combines one KernelState with another.
|
||||
/// * finalize: produces the end result of the aggregation using the
|
||||
/// KernelState in the KernelContext.
|
||||
struct ScalarAggregateKernel : public Kernel {
|
||||
ScalarAggregateKernel() = default;
|
||||
|
||||
ScalarAggregateKernel(std::shared_ptr<KernelSignature> sig, KernelInit init,
|
||||
ScalarAggregateConsume consume, ScalarAggregateMerge merge,
|
||||
ScalarAggregateFinalize finalize)
|
||||
: Kernel(std::move(sig), std::move(init)),
|
||||
consume(consume),
|
||||
merge(merge),
|
||||
finalize(finalize) {}
|
||||
|
||||
ScalarAggregateKernel(std::vector<InputType> in_types, OutputType out_type,
|
||||
KernelInit init, ScalarAggregateConsume consume,
|
||||
ScalarAggregateMerge merge, ScalarAggregateFinalize finalize)
|
||||
: ScalarAggregateKernel(
|
||||
KernelSignature::Make(std::move(in_types), std::move(out_type)),
|
||||
std::move(init), consume, merge, finalize) {}
|
||||
|
||||
/// \brief Merge a vector of KernelStates into a single KernelState.
|
||||
/// The merged state will be returned and will be set on the KernelContext.
|
||||
static Result<std::unique_ptr<KernelState>> MergeAll(
|
||||
const ScalarAggregateKernel* kernel, KernelContext* ctx,
|
||||
std::vector<std::unique_ptr<KernelState>> states);
|
||||
|
||||
ScalarAggregateConsume consume;
|
||||
ScalarAggregateMerge merge;
|
||||
ScalarAggregateFinalize finalize;
|
||||
};
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
// HashAggregateKernel (for HashAggregateFunction)
|
||||
|
||||
using HashAggregateResize = Status (*)(KernelContext*, int64_t);
|
||||
using HashAggregateConsume = Status (*)(KernelContext*, const ExecSpan&);
|
||||
using HashAggregateMerge = Status (*)(KernelContext*, KernelState&&, const ArrayData&);
|
||||
|
||||
// Finalize returns Datum to permit multiple return values
|
||||
using HashAggregateFinalize = Status (*)(KernelContext*, Datum*);
|
||||
|
||||
/// \brief Kernel data structure for implementations of
|
||||
/// HashAggregateFunction. The four necessary components of an aggregation
|
||||
/// kernel are the init, consume, merge, and finalize functions.
|
||||
///
|
||||
/// * init: creates a new KernelState for a kernel.
|
||||
/// * resize: ensure that the KernelState can accommodate the specified number of groups.
|
||||
/// * consume: processes an ExecSpan (which includes the argument as well
|
||||
/// as an array of group identifiers) and updates the KernelState found in the
|
||||
/// KernelContext.
|
||||
/// * merge: combines one KernelState with another.
|
||||
/// * finalize: produces the end result of the aggregation using the
|
||||
/// KernelState in the KernelContext.
|
||||
struct HashAggregateKernel : public Kernel {
|
||||
HashAggregateKernel() = default;
|
||||
|
||||
HashAggregateKernel(std::shared_ptr<KernelSignature> sig, KernelInit init,
|
||||
HashAggregateResize resize, HashAggregateConsume consume,
|
||||
HashAggregateMerge merge, HashAggregateFinalize finalize)
|
||||
: Kernel(std::move(sig), std::move(init)),
|
||||
resize(resize),
|
||||
consume(consume),
|
||||
merge(merge),
|
||||
finalize(finalize) {}
|
||||
|
||||
HashAggregateKernel(std::vector<InputType> in_types, OutputType out_type,
|
||||
KernelInit init, HashAggregateConsume consume,
|
||||
HashAggregateResize resize, HashAggregateMerge merge,
|
||||
HashAggregateFinalize finalize)
|
||||
: HashAggregateKernel(
|
||||
KernelSignature::Make(std::move(in_types), std::move(out_type)),
|
||||
std::move(init), resize, consume, merge, finalize) {}
|
||||
|
||||
HashAggregateResize resize;
|
||||
HashAggregateConsume consume;
|
||||
HashAggregateMerge merge;
|
||||
HashAggregateFinalize finalize;
|
||||
};
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,425 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "arrow/array.h"
|
||||
#include "arrow/compute/exec.h"
|
||||
#include "arrow/compute/exec/util.h"
|
||||
#include "arrow/type.h"
|
||||
#include "arrow/util/cpu_info.h"
|
||||
#include "arrow/util/logging.h"
|
||||
|
||||
/// This file contains lightweight containers for Arrow buffers. These containers
|
||||
/// makes compromises in terms of strong ownership and the range of data types supported
|
||||
/// in order to gain performance and reduced overhead.
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
/// \brief Context needed by various execution engine operations
|
||||
///
|
||||
/// In the execution engine this context is provided by either the node or the
|
||||
/// plan and the context exists for the lifetime of the plan. Defining this here
|
||||
/// allows us to take advantage of these resources without coupling the logic with
|
||||
/// the execution engine.
|
||||
struct LightContext {
|
||||
bool has_avx2() const { return (hardware_flags & arrow::internal::CpuInfo::AVX2) > 0; }
|
||||
int64_t hardware_flags;
|
||||
util::TempVectorStack* stack;
|
||||
};
|
||||
|
||||
/// \brief Description of the layout of a "key" column
|
||||
///
|
||||
/// A "key" column is a non-nested, non-union column.
|
||||
/// Every key column has either 0 (null), 2 (e.g. int32) or 3 (e.g. string) buffers
|
||||
/// and no children.
|
||||
///
|
||||
/// This metadata object is a zero-allocation analogue of arrow::DataType
|
||||
struct ARROW_EXPORT KeyColumnMetadata {
|
||||
KeyColumnMetadata() = default;
|
||||
KeyColumnMetadata(bool is_fixed_length_in, uint32_t fixed_length_in,
|
||||
bool is_null_type_in = false)
|
||||
: is_fixed_length(is_fixed_length_in),
|
||||
is_null_type(is_null_type_in),
|
||||
fixed_length(fixed_length_in) {}
|
||||
/// \brief True if the column is not a varying-length binary type
|
||||
///
|
||||
/// If this is true the column will have a validity buffer and
|
||||
/// a data buffer and the third buffer will be unused.
|
||||
bool is_fixed_length;
|
||||
/// \brief True if this column is the null type
|
||||
bool is_null_type;
|
||||
/// \brief The number of bytes for each item
|
||||
///
|
||||
/// Zero has a special meaning, indicating a bit vector with one bit per value if it
|
||||
/// isn't a null type column.
|
||||
///
|
||||
/// For a varying-length binary column this represents the number of bytes per offset.
|
||||
uint32_t fixed_length;
|
||||
};
|
||||
|
||||
/// \brief A lightweight view into a "key" array
|
||||
///
|
||||
/// A "key" column is a non-nested, non-union column \see KeyColumnMetadata
|
||||
///
|
||||
/// This metadata object is a zero-allocation analogue of arrow::ArrayData
|
||||
class ARROW_EXPORT KeyColumnArray {
|
||||
public:
|
||||
/// \brief Create an uninitialized KeyColumnArray
|
||||
KeyColumnArray() = default;
|
||||
/// \brief Create a read-only view from buffers
|
||||
///
|
||||
/// This is a view only and does not take ownership of the buffers. The lifetime
|
||||
/// of the buffers must exceed the lifetime of this view
|
||||
KeyColumnArray(const KeyColumnMetadata& metadata, int64_t length,
|
||||
const uint8_t* validity_buffer, const uint8_t* fixed_length_buffer,
|
||||
const uint8_t* var_length_buffer, int bit_offset_validity = 0,
|
||||
int bit_offset_fixed = 0);
|
||||
/// \brief Create a mutable view from buffers
|
||||
///
|
||||
/// This is a view only and does not take ownership of the buffers. The lifetime
|
||||
/// of the buffers must exceed the lifetime of this view
|
||||
KeyColumnArray(const KeyColumnMetadata& metadata, int64_t length,
|
||||
uint8_t* validity_buffer, uint8_t* fixed_length_buffer,
|
||||
uint8_t* var_length_buffer, int bit_offset_validity = 0,
|
||||
int bit_offset_fixed = 0);
|
||||
/// \brief Create a sliced view of `this`
|
||||
///
|
||||
/// The number of rows used in offset must be divisible by 8
|
||||
/// in order to not split bit vectors within a single byte.
|
||||
KeyColumnArray Slice(int64_t offset, int64_t length) const;
|
||||
/// \brief Create a copy of `this` with a buffer from `other`
|
||||
///
|
||||
/// The copy will be identical to `this` except the buffer at buffer_id_to_replace
|
||||
/// will be replaced by the corresponding buffer in `other`.
|
||||
KeyColumnArray WithBufferFrom(const KeyColumnArray& other,
|
||||
int buffer_id_to_replace) const;
|
||||
|
||||
/// \brief Create a copy of `this` with new metadata
|
||||
KeyColumnArray WithMetadata(const KeyColumnMetadata& metadata) const;
|
||||
|
||||
// Constants used for accessing buffers using data() and mutable_data().
|
||||
static constexpr int kValidityBuffer = 0;
|
||||
static constexpr int kFixedLengthBuffer = 1;
|
||||
static constexpr int kVariableLengthBuffer = 2;
|
||||
|
||||
/// \brief Return one of the underlying mutable buffers
|
||||
uint8_t* mutable_data(int i) {
|
||||
ARROW_DCHECK(i >= 0 && i <= kMaxBuffers);
|
||||
return mutable_buffers_[i];
|
||||
}
|
||||
/// \brief Return one of the underlying read-only buffers
|
||||
const uint8_t* data(int i) const {
|
||||
ARROW_DCHECK(i >= 0 && i <= kMaxBuffers);
|
||||
return buffers_[i];
|
||||
}
|
||||
/// \brief Return a mutable version of the offsets buffer
|
||||
///
|
||||
/// Only valid if this is a view into a varbinary type
|
||||
uint32_t* mutable_offsets() {
|
||||
DCHECK(!metadata_.is_fixed_length);
|
||||
DCHECK_EQ(metadata_.fixed_length, sizeof(uint32_t));
|
||||
return reinterpret_cast<uint32_t*>(mutable_data(kFixedLengthBuffer));
|
||||
}
|
||||
/// \brief Return a read-only version of the offsets buffer
|
||||
///
|
||||
/// Only valid if this is a view into a varbinary type
|
||||
const uint32_t* offsets() const {
|
||||
DCHECK(!metadata_.is_fixed_length);
|
||||
DCHECK_EQ(metadata_.fixed_length, sizeof(uint32_t));
|
||||
return reinterpret_cast<const uint32_t*>(data(kFixedLengthBuffer));
|
||||
}
|
||||
/// \brief Return a mutable version of the large-offsets buffer
|
||||
///
|
||||
/// Only valid if this is a view into a large varbinary type
|
||||
uint64_t* mutable_large_offsets() {
|
||||
DCHECK(!metadata_.is_fixed_length);
|
||||
DCHECK_EQ(metadata_.fixed_length, sizeof(uint64_t));
|
||||
return reinterpret_cast<uint64_t*>(mutable_data(kFixedLengthBuffer));
|
||||
}
|
||||
/// \brief Return a read-only version of the large-offsets buffer
|
||||
///
|
||||
/// Only valid if this is a view into a large varbinary type
|
||||
const uint64_t* large_offsets() const {
|
||||
DCHECK(!metadata_.is_fixed_length);
|
||||
DCHECK_EQ(metadata_.fixed_length, sizeof(uint64_t));
|
||||
return reinterpret_cast<const uint64_t*>(data(kFixedLengthBuffer));
|
||||
}
|
||||
/// \brief Return the type metadata
|
||||
const KeyColumnMetadata& metadata() const { return metadata_; }
|
||||
/// \brief Return the length (in rows) of the array
|
||||
int64_t length() const { return length_; }
|
||||
/// \brief Return the bit offset into the corresponding vector
|
||||
///
|
||||
/// if i == 1 then this must be a bool array
|
||||
int bit_offset(int i) const {
|
||||
ARROW_DCHECK(i >= 0 && i < kMaxBuffers);
|
||||
return bit_offset_[i];
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr int kMaxBuffers = 3;
|
||||
const uint8_t* buffers_[kMaxBuffers];
|
||||
uint8_t* mutable_buffers_[kMaxBuffers];
|
||||
KeyColumnMetadata metadata_;
|
||||
int64_t length_;
|
||||
// Starting bit offset within the first byte (between 0 and 7)
|
||||
// to be used when accessing buffers that store bit vectors.
|
||||
int bit_offset_[kMaxBuffers - 1];
|
||||
};
|
||||
|
||||
/// \brief Create KeyColumnMetadata from a DataType
|
||||
///
|
||||
/// If `type` is a dictionary type then this will return the KeyColumnMetadata for
|
||||
/// the indices type
|
||||
///
|
||||
/// This should only be called on "key" columns. Calling this with
|
||||
/// a non-key column will return Status::TypeError.
|
||||
ARROW_EXPORT Result<KeyColumnMetadata> ColumnMetadataFromDataType(
|
||||
const std::shared_ptr<DataType>& type);
|
||||
|
||||
/// \brief Create KeyColumnArray from ArrayData
|
||||
///
|
||||
/// If `type` is a dictionary type then this will return the KeyColumnArray for
|
||||
/// the indices array
|
||||
///
|
||||
/// The caller should ensure this is only called on "key" columns.
|
||||
/// \see ColumnMetadataFromDataType for details
|
||||
ARROW_EXPORT Result<KeyColumnArray> ColumnArrayFromArrayData(
|
||||
const std::shared_ptr<ArrayData>& array_data, int64_t start_row, int64_t num_rows);
|
||||
|
||||
/// \brief Create KeyColumnArray from ArrayData and KeyColumnMetadata
|
||||
///
|
||||
/// If `type` is a dictionary type then this will return the KeyColumnArray for
|
||||
/// the indices array
|
||||
///
|
||||
/// The caller should ensure this is only called on "key" columns.
|
||||
/// \see ColumnMetadataFromDataType for details
|
||||
ARROW_EXPORT KeyColumnArray ColumnArrayFromArrayDataAndMetadata(
|
||||
const std::shared_ptr<ArrayData>& array_data, const KeyColumnMetadata& metadata,
|
||||
int64_t start_row, int64_t num_rows);
|
||||
|
||||
/// \brief Create KeyColumnMetadata instances from an ExecBatch
|
||||
///
|
||||
/// column_metadatas will be resized to fit
|
||||
///
|
||||
/// All columns in `batch` must be eligible "key" columns and have an array shape
|
||||
/// \see ColumnMetadataFromDataType for more details
|
||||
ARROW_EXPORT Status ColumnMetadatasFromExecBatch(
|
||||
const ExecBatch& batch, std::vector<KeyColumnMetadata>* column_metadatas);
|
||||
|
||||
/// \brief Create KeyColumnArray instances from a slice of an ExecBatch
|
||||
///
|
||||
/// column_arrays will be resized to fit
|
||||
///
|
||||
/// All columns in `batch` must be eligible "key" columns and have an array shape
|
||||
/// \see ColumnArrayFromArrayData for more details
|
||||
ARROW_EXPORT Status ColumnArraysFromExecBatch(const ExecBatch& batch, int64_t start_row,
|
||||
int64_t num_rows,
|
||||
std::vector<KeyColumnArray>* column_arrays);
|
||||
|
||||
/// \brief Create KeyColumnArray instances from an ExecBatch
|
||||
///
|
||||
/// column_arrays will be resized to fit
|
||||
///
|
||||
/// All columns in `batch` must be eligible "key" columns and have an array shape
|
||||
/// \see ColumnArrayFromArrayData for more details
|
||||
ARROW_EXPORT Status ColumnArraysFromExecBatch(const ExecBatch& batch,
|
||||
std::vector<KeyColumnArray>* column_arrays);
|
||||
|
||||
/// A lightweight resizable array for "key" columns
|
||||
///
|
||||
/// Unlike KeyColumnArray this instance owns its buffers
|
||||
///
|
||||
/// Resizing is handled by arrow::ResizableBuffer and a doubling approach is
|
||||
/// used so that resizes will always grow up to the next power of 2
|
||||
class ARROW_EXPORT ResizableArrayData {
|
||||
public:
|
||||
/// \brief Create an uninitialized instance
|
||||
///
|
||||
/// Init must be called before calling any other operations
|
||||
ResizableArrayData()
|
||||
: log_num_rows_min_(0),
|
||||
pool_(NULLPTR),
|
||||
num_rows_(0),
|
||||
num_rows_allocated_(0),
|
||||
var_len_buf_size_(0) {}
|
||||
|
||||
~ResizableArrayData() { Clear(true); }
|
||||
|
||||
/// \brief Initialize the array
|
||||
/// \param data_type The data type this array is holding data for.
|
||||
/// \param pool The pool to make allocations on
|
||||
/// \param log_num_rows_min All resize operations will allocate at least enough
|
||||
/// space for (1 << log_num_rows_min) rows
|
||||
void Init(const std::shared_ptr<DataType>& data_type, MemoryPool* pool,
|
||||
int log_num_rows_min);
|
||||
|
||||
/// \brief Resets the array back to an empty state
|
||||
/// \param release_buffers If true then allocated memory is released and the
|
||||
/// next resize operation will have to reallocate memory
|
||||
void Clear(bool release_buffers);
|
||||
|
||||
/// \brief Resize the fixed length buffers
|
||||
///
|
||||
/// The buffers will be resized to hold at least `num_rows_new` rows of data
|
||||
Status ResizeFixedLengthBuffers(int num_rows_new);
|
||||
|
||||
/// \brief Resize the varying length buffer if this array is a variable binary type
|
||||
///
|
||||
/// This must be called after offsets have been populated and the buffer will be
|
||||
/// resized to hold at least as much data as the offsets require
|
||||
///
|
||||
/// Does nothing if the array is not a variable binary type
|
||||
Status ResizeVaryingLengthBuffer();
|
||||
|
||||
/// \brief The current length (in rows) of the array
|
||||
int num_rows() const { return num_rows_; }
|
||||
|
||||
/// \brief A non-owning view into this array
|
||||
KeyColumnArray column_array() const;
|
||||
|
||||
/// \brief A lightweight descriptor of the data held by this array
|
||||
Result<KeyColumnMetadata> column_metadata() const {
|
||||
return ColumnMetadataFromDataType(data_type_);
|
||||
}
|
||||
|
||||
/// \brief Convert the data to an arrow::ArrayData
|
||||
///
|
||||
/// This is a zero copy operation and the created ArrayData will reference the
|
||||
/// buffers held by this instance.
|
||||
std::shared_ptr<ArrayData> array_data() const;
|
||||
|
||||
// Constants used for accessing buffers using mutable_data().
|
||||
static constexpr int kValidityBuffer = 0;
|
||||
static constexpr int kFixedLengthBuffer = 1;
|
||||
static constexpr int kVariableLengthBuffer = 2;
|
||||
|
||||
/// \brief A raw pointer to the requested buffer
|
||||
///
|
||||
/// If i is 0 (kValidityBuffer) then this returns the validity buffer
|
||||
/// If i is 1 (kFixedLengthBuffer) then this returns the buffer used for values (if this
|
||||
/// is a fixed
|
||||
/// length data type) or offsets (if this is a variable binary type)
|
||||
/// If i is 2 (kVariableLengthBuffer) then this returns the buffer used for variable
|
||||
/// length binary data
|
||||
uint8_t* mutable_data(int i) { return buffers_[i]->mutable_data(); }
|
||||
|
||||
private:
|
||||
static constexpr int64_t kNumPaddingBytes = 64;
|
||||
int log_num_rows_min_;
|
||||
std::shared_ptr<DataType> data_type_;
|
||||
MemoryPool* pool_;
|
||||
int num_rows_;
|
||||
int num_rows_allocated_;
|
||||
int var_len_buf_size_;
|
||||
static constexpr int kMaxBuffers = 3;
|
||||
std::shared_ptr<ResizableBuffer> buffers_[kMaxBuffers];
|
||||
};
|
||||
|
||||
/// \brief A builder to concatenate batches of data into a larger batch
|
||||
///
|
||||
/// Will only store num_rows_max() rows
|
||||
class ARROW_EXPORT ExecBatchBuilder {
|
||||
public:
|
||||
/// \brief Add rows from `source` into `target` column
|
||||
///
|
||||
/// If `target` is uninitialized or cleared it will be initialized to use
|
||||
/// the given pool.
|
||||
static Status AppendSelected(const std::shared_ptr<ArrayData>& source,
|
||||
ResizableArrayData* target, int num_rows_to_append,
|
||||
const uint16_t* row_ids, MemoryPool* pool);
|
||||
|
||||
/// \brief Add nulls into `target` column
|
||||
///
|
||||
/// If `target` is uninitialized or cleared it will be initialized to use
|
||||
/// the given pool.
|
||||
static Status AppendNulls(const std::shared_ptr<DataType>& type,
|
||||
ResizableArrayData& target, int num_rows_to_append,
|
||||
MemoryPool* pool);
|
||||
|
||||
/// \brief Add selected rows from `batch`
|
||||
///
|
||||
/// If `col_ids` is null then `num_cols` should less than batch.num_values() and
|
||||
/// the first `num_cols` columns of batch will be appended.
|
||||
///
|
||||
/// All columns in `batch` must have array shape
|
||||
Status AppendSelected(MemoryPool* pool, const ExecBatch& batch, int num_rows_to_append,
|
||||
const uint16_t* row_ids, int num_cols,
|
||||
const int* col_ids = NULLPTR);
|
||||
|
||||
/// \brief Add all-null rows
|
||||
Status AppendNulls(MemoryPool* pool,
|
||||
const std::vector<std::shared_ptr<DataType>>& types,
|
||||
int num_rows_to_append);
|
||||
|
||||
/// \brief Create an ExecBatch with the data that has been appended so far
|
||||
/// and clear this builder to be used again
|
||||
///
|
||||
/// Should only be called if num_rows() returns non-zero.
|
||||
ExecBatch Flush();
|
||||
|
||||
int num_rows() const { return values_.empty() ? 0 : values_[0].num_rows(); }
|
||||
|
||||
static int num_rows_max() { return 1 << kLogNumRows; }
|
||||
|
||||
private:
|
||||
static constexpr int kLogNumRows = 15;
|
||||
|
||||
// Calculate how many rows to skip from the tail of the
|
||||
// sequence of selected rows, such that the total size of skipped rows is at
|
||||
// least equal to the size specified by the caller.
|
||||
//
|
||||
// Skipping of the tail rows
|
||||
// is used to allow for faster processing by the caller of remaining rows
|
||||
// without checking buffer bounds (useful with SIMD or fixed size memory loads
|
||||
// and stores).
|
||||
//
|
||||
// The sequence of row_ids provided must be non-decreasing.
|
||||
//
|
||||
static int NumRowsToSkip(const std::shared_ptr<ArrayData>& column, int num_rows,
|
||||
const uint16_t* row_ids, int num_tail_bytes_to_skip);
|
||||
|
||||
// The supplied lambda will be called for each row in the given list of rows.
|
||||
// The arguments given to it will be:
|
||||
// - index of a row (within the set of selected rows),
|
||||
// - pointer to the value,
|
||||
// - byte length of the value.
|
||||
//
|
||||
// The information about nulls (validity bitmap) is not used in this call and
|
||||
// has to be processed separately.
|
||||
//
|
||||
template <class PROCESS_VALUE_FN>
|
||||
static void Visit(const std::shared_ptr<ArrayData>& column, int num_rows,
|
||||
const uint16_t* row_ids, PROCESS_VALUE_FN process_value_fn);
|
||||
|
||||
template <bool OUTPUT_BYTE_ALIGNED>
|
||||
static void CollectBitsImp(const uint8_t* input_bits, int64_t input_bits_offset,
|
||||
uint8_t* output_bits, int64_t output_bits_offset,
|
||||
int num_rows, const uint16_t* row_ids);
|
||||
static void CollectBits(const uint8_t* input_bits, int64_t input_bits_offset,
|
||||
uint8_t* output_bits, int64_t output_bits_offset, int num_rows,
|
||||
const uint16_t* row_ids);
|
||||
|
||||
std::vector<ResizableArrayData> values_;
|
||||
};
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,124 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
// NOTE: API is EXPERIMENTAL and will change without going through a
|
||||
// deprecation cycle
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/status.h"
|
||||
#include "arrow/util/visibility.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
class Function;
|
||||
class FunctionOptionsType;
|
||||
|
||||
/// \brief A mutable central function registry for built-in functions as well
|
||||
/// as user-defined functions. Functions are implementations of
|
||||
/// arrow::compute::Function.
|
||||
///
|
||||
/// Generally, each function contains kernels which are implementations of a
|
||||
/// function for a specific argument signature. After looking up a function in
|
||||
/// the registry, one can either execute it eagerly with Function::Execute or
|
||||
/// use one of the function's dispatch methods to pick a suitable kernel for
|
||||
/// lower-level function execution.
|
||||
class ARROW_EXPORT FunctionRegistry {
|
||||
public:
|
||||
~FunctionRegistry();
|
||||
|
||||
/// \brief Construct a new registry.
|
||||
///
|
||||
/// Most users only need to use the global registry.
|
||||
static std::unique_ptr<FunctionRegistry> Make();
|
||||
|
||||
/// \brief Construct a new nested registry with the given parent.
|
||||
///
|
||||
/// Most users only need to use the global registry. The returned registry never changes
|
||||
/// its parent, even when an operation allows overwritting.
|
||||
static std::unique_ptr<FunctionRegistry> Make(FunctionRegistry* parent);
|
||||
|
||||
/// \brief Check whether a new function can be added to the registry.
|
||||
///
|
||||
/// \returns Status::KeyError if a function with the same name is already registered.
|
||||
Status CanAddFunction(std::shared_ptr<Function> function, bool allow_overwrite = false);
|
||||
|
||||
/// \brief Add a new function to the registry.
|
||||
///
|
||||
/// \returns Status::KeyError if a function with the same name is already registered.
|
||||
Status AddFunction(std::shared_ptr<Function> function, bool allow_overwrite = false);
|
||||
|
||||
/// \brief Check whether an alias can be added for the given function name.
|
||||
///
|
||||
/// \returns Status::KeyError if the function with the given name is not registered.
|
||||
Status CanAddAlias(const std::string& target_name, const std::string& source_name);
|
||||
|
||||
/// \brief Add alias for the given function name.
|
||||
///
|
||||
/// \returns Status::KeyError if the function with the given name is not registered.
|
||||
Status AddAlias(const std::string& target_name, const std::string& source_name);
|
||||
|
||||
/// \brief Check whether a new function options type can be added to the registry.
|
||||
///
|
||||
/// \return Status::KeyError if a function options type with the same name is already
|
||||
/// registered.
|
||||
Status CanAddFunctionOptionsType(const FunctionOptionsType* options_type,
|
||||
bool allow_overwrite = false);
|
||||
|
||||
/// \brief Add a new function options type to the registry.
|
||||
///
|
||||
/// \returns Status::KeyError if a function options type with the same name is already
|
||||
/// registered.
|
||||
Status AddFunctionOptionsType(const FunctionOptionsType* options_type,
|
||||
bool allow_overwrite = false);
|
||||
|
||||
/// \brief Retrieve a function by name from the registry.
|
||||
Result<std::shared_ptr<Function>> GetFunction(const std::string& name) const;
|
||||
|
||||
/// \brief Return vector of all entry names in the registry.
|
||||
///
|
||||
/// Helpful for displaying a manifest of available functions.
|
||||
std::vector<std::string> GetFunctionNames() const;
|
||||
|
||||
/// \brief Retrieve a function options type by name from the registry.
|
||||
Result<const FunctionOptionsType*> GetFunctionOptionsType(
|
||||
const std::string& name) const;
|
||||
|
||||
/// \brief The number of currently registered functions.
|
||||
int num_functions() const;
|
||||
|
||||
private:
|
||||
FunctionRegistry();
|
||||
|
||||
// Use PIMPL pattern to not have std::unordered_map here
|
||||
class FunctionRegistryImpl;
|
||||
std::unique_ptr<FunctionRegistryImpl> impl_;
|
||||
|
||||
explicit FunctionRegistry(FunctionRegistryImpl* impl);
|
||||
};
|
||||
|
||||
/// \brief Return the process-global function registry.
|
||||
ARROW_EXPORT FunctionRegistry* GetFunctionRegistry();
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,112 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "arrow/compute/exec.h"
|
||||
#include "arrow/compute/exec/options.h"
|
||||
#include "arrow/compute/kernel.h"
|
||||
#include "arrow/datum.h"
|
||||
#include "arrow/result.h"
|
||||
#include "arrow/util/visibility.h"
|
||||
|
||||
namespace arrow {
|
||||
namespace compute {
|
||||
|
||||
/// Consumes batches of keys and yields batches of the group ids.
|
||||
class ARROW_EXPORT Grouper {
|
||||
public:
|
||||
virtual ~Grouper() = default;
|
||||
|
||||
/// Construct a Grouper which receives the specified key types
|
||||
static Result<std::unique_ptr<Grouper>> Make(const std::vector<TypeHolder>& key_types,
|
||||
ExecContext* ctx = default_exec_context());
|
||||
|
||||
/// Consume a batch of keys, producing the corresponding group ids as an integer array.
|
||||
/// Currently only uint32 indices will be produced, eventually the bit width will only
|
||||
/// be as wide as necessary.
|
||||
virtual Result<Datum> Consume(const ExecSpan& batch) = 0;
|
||||
|
||||
/// Get current unique keys. May be called multiple times.
|
||||
virtual Result<ExecBatch> GetUniques() = 0;
|
||||
|
||||
/// Get the current number of groups.
|
||||
virtual uint32_t num_groups() const = 0;
|
||||
|
||||
/// \brief Assemble lists of indices of identical elements.
|
||||
///
|
||||
/// \param[in] ids An unsigned, all-valid integral array which will be
|
||||
/// used as grouping criteria.
|
||||
/// \param[in] num_groups An upper bound for the elements of ids
|
||||
/// \param[in] ctx Execution context to use during the operation
|
||||
/// \return A num_groups-long ListArray where the slot at i contains a
|
||||
/// list of indices where i appears in ids.
|
||||
///
|
||||
/// MakeGroupings([
|
||||
/// 2,
|
||||
/// 2,
|
||||
/// 5,
|
||||
/// 5,
|
||||
/// 2,
|
||||
/// 3
|
||||
/// ], 8) == [
|
||||
/// [],
|
||||
/// [],
|
||||
/// [0, 1, 4],
|
||||
/// [5],
|
||||
/// [],
|
||||
/// [2, 3],
|
||||
/// [],
|
||||
/// []
|
||||
/// ]
|
||||
static Result<std::shared_ptr<ListArray>> MakeGroupings(
|
||||
const UInt32Array& ids, uint32_t num_groups,
|
||||
ExecContext* ctx = default_exec_context());
|
||||
|
||||
/// \brief Produce a ListArray whose slots are selections of `array` which correspond to
|
||||
/// the provided groupings.
|
||||
///
|
||||
/// For example,
|
||||
/// ApplyGroupings([
|
||||
/// [],
|
||||
/// [],
|
||||
/// [0, 1, 4],
|
||||
/// [5],
|
||||
/// [],
|
||||
/// [2, 3],
|
||||
/// [],
|
||||
/// []
|
||||
/// ], [2, 2, 5, 5, 2, 3]) == [
|
||||
/// [],
|
||||
/// [],
|
||||
/// [2, 2, 2],
|
||||
/// [3],
|
||||
/// [],
|
||||
/// [5, 5],
|
||||
/// [],
|
||||
/// []
|
||||
/// ]
|
||||
static Result<std::shared_ptr<ListArray>> ApplyGroupings(
|
||||
const ListArray& groupings, const Array& array,
|
||||
ExecContext* ctx = default_exec_context());
|
||||
};
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
@@ -0,0 +1,62 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "arrow/util/visibility.h"
|
||||
|
||||
namespace arrow {
|
||||
|
||||
struct Datum;
|
||||
struct TypeHolder;
|
||||
|
||||
namespace compute {
|
||||
|
||||
class Function;
|
||||
class FunctionExecutor;
|
||||
class FunctionOptions;
|
||||
class FunctionRegistry;
|
||||
|
||||
class CastOptions;
|
||||
|
||||
struct ExecBatch;
|
||||
class ExecContext;
|
||||
class KernelContext;
|
||||
|
||||
struct Kernel;
|
||||
struct ScalarKernel;
|
||||
struct ScalarAggregateKernel;
|
||||
struct VectorKernel;
|
||||
|
||||
struct KernelState;
|
||||
|
||||
struct Declaration;
|
||||
class Expression;
|
||||
class ExecNode;
|
||||
class ExecPlan;
|
||||
class ExecNodeOptions;
|
||||
class ExecFactoryRegistry;
|
||||
class QueryContext;
|
||||
struct QueryOptions;
|
||||
|
||||
class SinkNodeConsumer;
|
||||
|
||||
ARROW_EXPORT ExecContext* default_exec_context();
|
||||
ARROW_EXPORT ExecContext* threaded_exec_context();
|
||||
|
||||
} // namespace compute
|
||||
} // namespace arrow
|
||||
Reference in New Issue
Block a user