diff --git a/include/tensorflow/README.md b/include/tensorflow/README.md new file mode 100644 index 00000000000..33e88b2289e --- /dev/null +++ b/include/tensorflow/README.md @@ -0,0 +1,15 @@ +These headers have been copied from TensorFlow 2.13.0. + +To update the files to those in newer versions of TensorFlow: + +cd $TENSORFLOW_CHECKOUT +cp --parents tensorflow/lite/builtin_ops.h $MESA_DIR/include/. +cp --parents tensorflow/lite/c/common.h $MESA_DIR/include/. +cp --parents tensorflow/lite/c/c_api.h $MESA_DIR/include/. +cp --parents tensorflow/lite/core/async/c/types.h $MESA_DIR/include/. +cp --parents tensorflow/lite/core/c/builtin_op_data.h $MESA_DIR/include/. +cp --parents tensorflow/lite/core/c/c_api.h $MESA_DIR/include/. +cp --parents tensorflow/lite/core/c/c_api_experimental.h $MESA_DIR/include/. +cp --parents tensorflow/lite/core/c/c_api_opaque.h $MESA_DIR/include/. +cp --parents tensorflow/lite/core/c/c_api_types.h $MESA_DIR/include/. +cp --parents tensorflow/lite/core/c/common.h $MESA_DIR/include/. diff --git a/include/tensorflow/lite/builtin_ops.h b/include/tensorflow/lite/builtin_ops.h new file mode 100644 index 00000000000..f9871add248 --- /dev/null +++ b/include/tensorflow/lite/builtin_ops.h @@ -0,0 +1,197 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_BUILTIN_OPS_H_ +#define TENSORFLOW_LITE_BUILTIN_OPS_H_ + +// DO NOT EDIT MANUALLY: This file is automatically generated by +// `schema/builtin_ops_header/generator.cc`. + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// The enum for builtin operators. +// Note: CUSTOM, DELEGATE, and PLACEHOLDER_FOR_GREATER_OP_CODES are 3 special +// ops which are not real built-in ops. +typedef enum { + kTfLiteBuiltinAdd = 0, + kTfLiteBuiltinAveragePool2d = 1, + kTfLiteBuiltinConcatenation = 2, + kTfLiteBuiltinConv2d = 3, + kTfLiteBuiltinDepthwiseConv2d = 4, + kTfLiteBuiltinDepthToSpace = 5, + kTfLiteBuiltinDequantize = 6, + kTfLiteBuiltinEmbeddingLookup = 7, + kTfLiteBuiltinFloor = 8, + kTfLiteBuiltinFullyConnected = 9, + kTfLiteBuiltinHashtableLookup = 10, + kTfLiteBuiltinL2Normalization = 11, + kTfLiteBuiltinL2Pool2d = 12, + kTfLiteBuiltinLocalResponseNormalization = 13, + kTfLiteBuiltinLogistic = 14, + kTfLiteBuiltinLshProjection = 15, + kTfLiteBuiltinLstm = 16, + kTfLiteBuiltinMaxPool2d = 17, + kTfLiteBuiltinMul = 18, + kTfLiteBuiltinRelu = 19, + kTfLiteBuiltinReluN1To1 = 20, + kTfLiteBuiltinRelu6 = 21, + kTfLiteBuiltinReshape = 22, + kTfLiteBuiltinResizeBilinear = 23, + kTfLiteBuiltinRnn = 24, + kTfLiteBuiltinSoftmax = 25, + kTfLiteBuiltinSpaceToDepth = 26, + kTfLiteBuiltinSvdf = 27, + kTfLiteBuiltinTanh = 28, + kTfLiteBuiltinConcatEmbeddings = 29, + kTfLiteBuiltinSkipGram = 30, + kTfLiteBuiltinCall = 31, + kTfLiteBuiltinCustom = 32, + kTfLiteBuiltinEmbeddingLookupSparse = 33, + kTfLiteBuiltinPad = 34, + kTfLiteBuiltinUnidirectionalSequenceRnn = 35, + kTfLiteBuiltinGather = 36, + kTfLiteBuiltinBatchToSpaceNd = 37, + kTfLiteBuiltinSpaceToBatchNd = 38, + kTfLiteBuiltinTranspose = 39, + kTfLiteBuiltinMean = 40, + kTfLiteBuiltinSub = 41, + kTfLiteBuiltinDiv = 42, + kTfLiteBuiltinSqueeze = 43, + kTfLiteBuiltinUnidirectionalSequenceLstm = 44, + kTfLiteBuiltinStridedSlice = 45, + kTfLiteBuiltinBidirectionalSequenceRnn = 46, + kTfLiteBuiltinExp = 47, + kTfLiteBuiltinTopkV2 = 48, + kTfLiteBuiltinSplit = 49, + kTfLiteBuiltinLogSoftmax = 50, + kTfLiteBuiltinDelegate = 51, + kTfLiteBuiltinBidirectionalSequenceLstm = 52, + kTfLiteBuiltinCast = 53, + kTfLiteBuiltinPrelu = 54, + kTfLiteBuiltinMaximum = 55, + kTfLiteBuiltinArgMax = 56, + kTfLiteBuiltinMinimum = 57, + kTfLiteBuiltinLess = 58, + kTfLiteBuiltinNeg = 59, + kTfLiteBuiltinPadv2 = 60, + kTfLiteBuiltinGreater = 61, + kTfLiteBuiltinGreaterEqual = 62, + kTfLiteBuiltinLessEqual = 63, + kTfLiteBuiltinSelect = 64, + kTfLiteBuiltinSlice = 65, + kTfLiteBuiltinSin = 66, + kTfLiteBuiltinTransposeConv = 67, + kTfLiteBuiltinSparseToDense = 68, + kTfLiteBuiltinTile = 69, + kTfLiteBuiltinExpandDims = 70, + kTfLiteBuiltinEqual = 71, + kTfLiteBuiltinNotEqual = 72, + kTfLiteBuiltinLog = 73, + kTfLiteBuiltinSum = 74, + kTfLiteBuiltinSqrt = 75, + kTfLiteBuiltinRsqrt = 76, + kTfLiteBuiltinShape = 77, + kTfLiteBuiltinPow = 78, + kTfLiteBuiltinArgMin = 79, + kTfLiteBuiltinFakeQuant = 80, + kTfLiteBuiltinReduceProd = 81, + kTfLiteBuiltinReduceMax = 82, + kTfLiteBuiltinPack = 83, + kTfLiteBuiltinLogicalOr = 84, + kTfLiteBuiltinOneHot = 85, + kTfLiteBuiltinLogicalAnd = 86, + kTfLiteBuiltinLogicalNot = 87, + kTfLiteBuiltinUnpack = 88, + kTfLiteBuiltinReduceMin = 89, + kTfLiteBuiltinFloorDiv = 90, + kTfLiteBuiltinReduceAny = 91, + kTfLiteBuiltinSquare = 92, + kTfLiteBuiltinZerosLike = 93, + kTfLiteBuiltinFill = 94, + kTfLiteBuiltinFloorMod = 95, + kTfLiteBuiltinRange = 96, + kTfLiteBuiltinResizeNearestNeighbor = 97, + kTfLiteBuiltinLeakyRelu = 98, + kTfLiteBuiltinSquaredDifference = 99, + kTfLiteBuiltinMirrorPad = 100, + kTfLiteBuiltinAbs = 101, + kTfLiteBuiltinSplitV = 102, + kTfLiteBuiltinUnique = 103, + kTfLiteBuiltinCeil = 104, + kTfLiteBuiltinReverseV2 = 105, + kTfLiteBuiltinAddN = 106, + kTfLiteBuiltinGatherNd = 107, + kTfLiteBuiltinCos = 108, + kTfLiteBuiltinWhere = 109, + kTfLiteBuiltinRank = 110, + kTfLiteBuiltinElu = 111, + kTfLiteBuiltinReverseSequence = 112, + kTfLiteBuiltinMatrixDiag = 113, + kTfLiteBuiltinQuantize = 114, + kTfLiteBuiltinMatrixSetDiag = 115, + kTfLiteBuiltinRound = 116, + kTfLiteBuiltinHardSwish = 117, + kTfLiteBuiltinIf = 118, + kTfLiteBuiltinWhile = 119, + kTfLiteBuiltinNonMaxSuppressionV4 = 120, + kTfLiteBuiltinNonMaxSuppressionV5 = 121, + kTfLiteBuiltinScatterNd = 122, + kTfLiteBuiltinSelectV2 = 123, + kTfLiteBuiltinDensify = 124, + kTfLiteBuiltinSegmentSum = 125, + kTfLiteBuiltinBatchMatmul = 126, + kTfLiteBuiltinPlaceholderForGreaterOpCodes = 127, + kTfLiteBuiltinCumsum = 128, + kTfLiteBuiltinCallOnce = 129, + kTfLiteBuiltinBroadcastTo = 130, + kTfLiteBuiltinRfft2d = 131, + kTfLiteBuiltinConv3d = 132, + kTfLiteBuiltinImag = 133, + kTfLiteBuiltinReal = 134, + kTfLiteBuiltinComplexAbs = 135, + kTfLiteBuiltinHashtable = 136, + kTfLiteBuiltinHashtableFind = 137, + kTfLiteBuiltinHashtableImport = 138, + kTfLiteBuiltinHashtableSize = 139, + kTfLiteBuiltinReduceAll = 140, + kTfLiteBuiltinConv3dTranspose = 141, + kTfLiteBuiltinVarHandle = 142, + kTfLiteBuiltinReadVariable = 143, + kTfLiteBuiltinAssignVariable = 144, + kTfLiteBuiltinBroadcastArgs = 145, + kTfLiteBuiltinRandomStandardNormal = 146, + kTfLiteBuiltinBucketize = 147, + kTfLiteBuiltinRandomUniform = 148, + kTfLiteBuiltinMultinomial = 149, + kTfLiteBuiltinGelu = 150, + kTfLiteBuiltinDynamicUpdateSlice = 151, + kTfLiteBuiltinRelu0To1 = 152, + kTfLiteBuiltinUnsortedSegmentProd = 153, + kTfLiteBuiltinUnsortedSegmentMax = 154, + kTfLiteBuiltinUnsortedSegmentSum = 155, + kTfLiteBuiltinAtan2 = 156, + kTfLiteBuiltinUnsortedSegmentMin = 157, + kTfLiteBuiltinSign = 158, + kTfLiteBuiltinBitcast = 159, + kTfLiteBuiltinBitwiseXor = 160, + kTfLiteBuiltinRightShift = 161, +} TfLiteBuiltinOperator; + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus +#endif // TENSORFLOW_LITE_BUILTIN_OPS_H_ diff --git a/include/tensorflow/lite/c/c_api.h b/include/tensorflow/lite/c/c_api.h new file mode 100644 index 00000000000..4b09cf88440 --- /dev/null +++ b/include/tensorflow/lite/c/c_api.h @@ -0,0 +1,26 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_C_C_API_H_ +#define TENSORFLOW_LITE_C_C_API_H_ + +/// \file +/// +/// C API for TensorFlow Lite. +/// +/// For documentation, see tensorflow/lite/core/c/c_api.h + +#include "tensorflow/lite/core/c/c_api.h" + +#endif // TENSORFLOW_LITE_C_C_API_H_ diff --git a/include/tensorflow/lite/c/common.h b/include/tensorflow/lite/c/common.h new file mode 100644 index 00000000000..e3e8001cbd7 --- /dev/null +++ b/include/tensorflow/lite/c/common.h @@ -0,0 +1,41 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// This file defines common C types and APIs for implementing operations, +// delegates and other constructs in TensorFlow Lite. The actual operations and +// delegates can be defined using C++, but the interface between the interpreter +// and the operations are C. +// +// Summary of abstractions +// TF_LITE_ENSURE - Self-sufficient error checking +// TfLiteStatus - Status reporting +// TfLiteIntArray - stores tensor shapes (dims), +// TfLiteContext - allows an op to access the tensors +// TfLiteTensor - tensor (a multidimensional array) +// TfLiteNode - a single node or operation +// TfLiteRegistration - the implementation of a conceptual operation. +// TfLiteDelegate - allows delegation of nodes to alternative backends. +// +// Some abstractions in this file are created and managed by Interpreter. +// +// NOTE: The order of values in these structs are "semi-ABI stable". New values +// should be added only to the end of structs and never reordered. + +#ifndef TENSORFLOW_LITE_C_COMMON_H_ +#define TENSORFLOW_LITE_C_COMMON_H_ + +#include "tensorflow/lite/core/c/common.h" + +#endif // TENSORFLOW_LITE_C_COMMON_H_ diff --git a/include/tensorflow/lite/core/async/c/types.h b/include/tensorflow/lite/core/async/c/types.h new file mode 100644 index 00000000000..8dabfdc2833 --- /dev/null +++ b/include/tensorflow/lite/core/async/c/types.h @@ -0,0 +1,43 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_CORE_ASYNC_C_TYPES_H_ +#define TENSORFLOW_LITE_CORE_ASYNC_C_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/// Opaque type for TfLiteAsyncKernel. +typedef struct TfLiteAsyncKernel TfLiteAsyncKernel; + +/// Opaque type for TfLiteExecutionTask. +/// +/// See tensorflow/lite/core/async/c/task.h +/// NOTE: TfLiteExecutionTask is NOT thread-safe. +typedef struct TfLiteExecutionTask TfLiteExecutionTask; + +/// Enum tag for specifying whether a tensor is the input or output to the +/// model. +typedef enum TfLiteIoType { + kTfLiteIoTypeUnknown = 0, + kTfLiteIoTypeInput = 1, + kTfLiteIoTypeOutput = 2, +} TfLiteIoType; + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif // TENSORFLOW_LITE_CORE_ASYNC_C_TYPES_H_ diff --git a/include/tensorflow/lite/core/c/builtin_op_data.h b/include/tensorflow/lite/core/c/builtin_op_data.h new file mode 100644 index 00000000000..26d7e9989c8 --- /dev/null +++ b/include/tensorflow/lite/core/c/builtin_op_data.h @@ -0,0 +1,537 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +/// WARNING: Users of TensorFlow Lite should not include this file directly, +/// but should instead include +/// "third_party/tensorflow/lite/c/builtin_op_data.h". +/// Only the TensorFlow Lite implementation itself should include this +/// file directly. +#ifndef TENSORFLOW_LITE_CORE_C_BUILTIN_OP_DATA_H_ +#define TENSORFLOW_LITE_CORE_C_BUILTIN_OP_DATA_H_ + +#include + +#include "tensorflow/lite/core/c/common.h" + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// TfLiteReshapeParams can't have dynamic data so we fix the maximum possible +// number of dimensions. +#define TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT 8 + +// TODO(aselle): Consider using "if this then that" for testing. + +// Useful placeholder to put in otherwise empty structs to avoid size warnings. +typedef struct { + char dummy; +} EmptyStructPlaceholder; + +// IMPORTANT: All new members of structs must be added at the end to ensure +// backwards compatibility. + +// Possible padding types (for convolutions) +typedef enum { + kTfLitePaddingUnknown = 0, + kTfLitePaddingSame, + kTfLitePaddingValid, +} TfLitePadding; + +typedef enum { + kTfLiteMirrorPaddingUnknown = 0, + kTfLiteMirrorPaddingReflect, + kTfLiteMirrorPaddingSymmetric, +} TfLiteMirrorPaddingMode; + +// TODO(b/130259536): We should move this out of builtin_op_data. +typedef struct { + int width; + int height; + int width_offset; + int height_offset; +} TfLitePaddingValues; + +typedef struct { + TfLiteMirrorPaddingMode mode; +} TfLiteMirrorPaddingParams; + +// Possible fused activation functions. +typedef enum { + kTfLiteActNone = 0, + kTfLiteActRelu, + kTfLiteActReluN1To1, // min(max(-1, x), 1) + kTfLiteActRelu6, // min(max(0, x), 6) + kTfLiteActTanh, + kTfLiteActSignBit, + kTfLiteActSigmoid, +} TfLiteFusedActivation; + +typedef struct { + // Parameters for CONV_2D version 1. + TfLitePadding padding; + int stride_width; + int stride_height; + TfLiteFusedActivation activation; + + // Parameters for CONV_2D version 2. + // Note: Version 2 supports dilation values not equal to 1. + int dilation_width_factor; + int dilation_height_factor; +} TfLiteConvParams; + +typedef struct { + TfLitePadding padding; + int stride_width; + int stride_height; + int stride_depth; + int dilation_width_factor; + int dilation_height_factor; + int dilation_depth_factor; + TfLiteFusedActivation activation; +} TfLiteConv3DParams; + +typedef TfLiteConv3DParams TfLiteConv3DTransposeParams; + +typedef struct { + TfLitePadding padding; + int stride_width; + int stride_height; + int filter_width; + int filter_height; + TfLiteFusedActivation activation; + struct { + TfLitePaddingValues padding; + } computed; +} TfLitePoolParams; + +typedef struct { + // Parameters for DepthwiseConv version 1 or above. + TfLitePadding padding; + int stride_width; + int stride_height; + // `depth_multiplier` is redundant. It's used by CPU kernels in + // TensorFlow 2.0 or below, but ignored in versions above. + // + // The information can be deduced from the shape of input and the shape of + // weights. Since the TFLiteConverter toolchain doesn't support partially + // specified shapes, relying on `depth_multiplier` stops us from supporting + // graphs with dynamic shape tensors. + // + // Note: Some of the delegates (e.g. NNAPI, GPU) are still relying on this + // field. + int depth_multiplier; + TfLiteFusedActivation activation; + // Parameters for DepthwiseConv version 2 or above. + int dilation_width_factor; + int dilation_height_factor; +} TfLiteDepthwiseConvParams; + +typedef struct { + int rank; + TfLiteFusedActivation activation; + + // Parameter for SVDF version 4. + bool asymmetric_quantize_inputs; +} TfLiteSVDFParams; + +typedef struct { + TfLiteFusedActivation activation; + + // Parameter for RNN version 3. + bool asymmetric_quantize_inputs; +} TfLiteRNNParams; + +typedef struct { + bool time_major; + TfLiteFusedActivation activation; + + // Parameter for Sequence RNN version 3. + bool asymmetric_quantize_inputs; +} TfLiteSequenceRNNParams; + +typedef struct { + bool time_major; + TfLiteFusedActivation activation; + bool merge_outputs; + + // Parameter for Bidirectional RNN verison 3. + bool asymmetric_quantize_inputs; +} TfLiteBidirectionalSequenceRNNParams; + +typedef enum { + kTfLiteFullyConnectedWeightsFormatDefault = 0, + kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8 = 1, +} TfLiteFullyConnectedWeightsFormat; + +typedef struct { + // Parameters for FullyConnected version 1 or above. + TfLiteFusedActivation activation; + + // Parameters for FullyConnected version 2 or above. + TfLiteFullyConnectedWeightsFormat weights_format; + + // Parameters for FullyConnected version 5 or above. + // If set to true, then the number of dimensions in the input and the output + // tensors are the same. Furthermore, all but the last dimension of the input + // and output shapes will be equal. + bool keep_num_dims; + + // Parameters for FullyConnected version 7 or above. + // If set to true and the weights are quantized, then non constant inputs + // are quantized at evaluation time with asymmetric quantization. + bool asymmetric_quantize_inputs; +} TfLiteFullyConnectedParams; + +typedef enum { + kTfLiteLshProjectionUnknown = 0, + kTfLiteLshProjectionSparse = 1, + kTfLiteLshProjectionDense = 2, +} TfLiteLSHProjectionType; + +typedef struct { + TfLiteLSHProjectionType type; +} TfLiteLSHProjectionParams; + +typedef struct { + float beta; +} TfLiteSoftmaxParams; + +typedef struct { + int axis; + TfLiteFusedActivation activation; +} TfLiteConcatenationParams; + +typedef struct { + TfLiteFusedActivation activation; + // Parameter added for the version 4. + bool pot_scale_int16; +} TfLiteAddParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteSpaceToBatchNDParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteBatchToSpaceNDParams; + +typedef struct { + bool adj_x; + bool adj_y; + // Parameters for BatchMatMul version 4 or above. + // If set to true and the weights are quantized, then non constant inputs + // are quantized at evaluation time with asymmetric quantization. + bool asymmetric_quantize_inputs; +} TfLiteBatchMatMulParams; + +typedef struct { + TfLiteFusedActivation activation; +} TfLiteMulParams; + +typedef struct { + TfLiteFusedActivation activation; + // Parameter added for the version 5. + bool pot_scale_int16; +} TfLiteSubParams; + +typedef struct { + TfLiteFusedActivation activation; +} TfLiteDivParams; + +typedef struct { + TfLiteFusedActivation activation; +} TfLiteL2NormParams; + +typedef struct { + int radius; + float bias; + float alpha; + float beta; +} TfLiteLocalResponseNormParams; + +typedef enum { + kTfLiteLSTMFullKernel = 0, + kTfLiteLSTMBasicKernel +} TfLiteLSTMKernelType; + +typedef struct { + // Parameters for LSTM version 1. + TfLiteFusedActivation activation; + float cell_clip; + float proj_clip; + + // Parameters for LSTM version 2. + // kTfLiteLSTMBasicKernel is only supported in version 2 or above. + TfLiteLSTMKernelType kernel_type; + + // Parameters for LSTM version 4. + bool asymmetric_quantize_inputs; +} TfLiteLSTMParams; + +typedef struct { + // Parameters needed for the underlying LSTM. + TfLiteFusedActivation activation; + float cell_clip; + float proj_clip; + + // If set to true then the first dimension is time, otherwise batch. + bool time_major; + + // Parameter for unidirectional sequence RNN version 3. + bool asymmetric_quantize_inputs; + + // Parameter for unidirectional sequence RNN version 4. + bool diagonal_recurrent_tensors; +} TfLiteUnidirectionalSequenceLSTMParams; + +typedef struct { + // Parameters supported by version 1: + // Parameters inherited for the LSTM kernel. + TfLiteFusedActivation activation; + float cell_clip; + float proj_clip; + + // If true, store the outputs of both directions in the first output. + bool merge_outputs; + + // Parameters supported by version 2: + // If set to true then the first dimension is time, otherwise batch. + bool time_major; + + // Parameters supported by version 3: + // If set to true, then hybrid ops use asymmetric quantization for inputs. + bool asymmetric_quantize_inputs; +} TfLiteBidirectionalSequenceLSTMParams; + +typedef struct { + bool align_corners; + // half_pixel_centers assumes pixels are of half the actual dimensions, and + // yields more accurate resizes. Corresponds to the same argument for the + // original TensorFlow op in TF2.0. + bool half_pixel_centers; +} TfLiteResizeBilinearParams; + +typedef struct { + bool align_corners; + bool half_pixel_centers; +} TfLiteResizeNearestNeighborParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLitePadParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLitePadV2Params; + +typedef struct { + // These fields are only used in old models for backward compatibility. + // In the current implementation, we use the 2nd input of the op as the shape, + // and these fields are unused. + int shape[TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT]; + int num_dimensions; +} TfLiteReshapeParams; + +typedef struct { + int ngram_size; + int max_skip_size; + bool include_all_ngrams; +} TfLiteSkipGramParams; + +typedef struct { + int block_size; +} TfLiteSpaceToDepthParams; + +typedef struct { + int block_size; +} TfLiteDepthToSpaceParams; + +typedef struct { + TfLiteType in_data_type; + TfLiteType out_data_type; +} TfLiteCastParams; + +typedef enum { + kTfLiteCombinerTypeSum = 0, + kTfLiteCombinerTypeMean = 1, + kTfLiteCombinerTypeSqrtn = 2, +} TfLiteCombinerType; + +typedef struct { + TfLiteCombinerType combiner; +} TfLiteEmbeddingLookupSparseParams; + +typedef struct { + int axis; + int batch_dims; +} TfLiteGatherParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteTransposeParams; + +typedef struct { + bool keep_dims; +} TfLiteReducerParams; + +typedef struct { + int num_splits; +} TfLiteSplitParams; + +typedef struct { + int num_splits; +} TfLiteSplitVParams; + +typedef struct { + // TODO(ahentz): We can't have dynamic data in this struct, at least not yet. + // For now we will fix the maximum possible number of dimensions. + int squeeze_dims[8]; + int num_squeeze_dims; +} TfLiteSqueezeParams; + +typedef struct { + int begin_mask; + int end_mask; + int ellipsis_mask; + int new_axis_mask; + int shrink_axis_mask; +} TfLiteStridedSliceParams; + +typedef struct { + TfLiteType output_type; +} TfLiteArgMaxParams; + +typedef struct { + TfLiteType output_type; +} TfLiteArgMinParams; + +typedef struct { + // Parameters supported by version 1: + TfLitePadding padding; + int stride_width; + int stride_height; + + // Parameters supported by version 4: + TfLiteFusedActivation activation; +} TfLiteTransposeConvParams; + +typedef struct { + bool validate_indices; +} TfLiteSparseToDenseParams; + +typedef struct { + TfLiteType out_type; +} TfLiteShapeParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteRankParams; + +typedef struct { + // Parameters supported by version 1: + float min; + float max; + int num_bits; + + // Parameters supported by version 2: + bool narrow_range; +} TfLiteFakeQuantParams; + +typedef struct { + int values_count; + int axis; +} TfLitePackParams; + +typedef struct { + int axis; +} TfLiteOneHotParams; + +typedef struct { + int num; + int axis; +} TfLiteUnpackParams; + +typedef struct { + float alpha; +} TfLiteLeakyReluParams; + +typedef struct { + TfLiteType index_out_type; +} TfLiteUniqueParams; + +typedef struct { + int seq_dim; + int batch_dim; +} TfLiteReverseSequenceParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteMatrixDiagParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteMatrixSetDiagParams; + +typedef struct { + int then_subgraph_index; + int else_subgraph_index; +} TfLiteIfParams; + +typedef struct { + int cond_subgraph_index; + int body_subgraph_index; +} TfLiteWhileParams; + +typedef struct { + bool exclusive; + bool reverse; +} TfLiteCumsumParams; + +typedef struct { + int init_subgraph_index; +} TfLiteCallOnceParams; + +typedef struct { + int table_id; + TfLiteType key_dtype; + TfLiteType value_dtype; +} TfLiteHashtableParams; + +typedef struct { + const char* container; + const char* shared_name; +} TfLiteVarHandleParams; + +typedef struct { + int seed; + int seed2; +} TfLiteRandomParams; + +typedef struct { + int num_boundaries; + // This points to the memory stored in the model (flatbuffer), + // and is not owned. + const float* boundaries; +} TfLiteBucketizeParams; + +typedef struct { + bool approximate; +} TfLiteGeluParams; + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif // TENSORFLOW_LITE_CORE_C_BUILTIN_OP_DATA_H_ diff --git a/include/tensorflow/lite/core/c/c_api.h b/include/tensorflow/lite/core/c/c_api.h new file mode 100644 index 00000000000..a4316b6ae9e --- /dev/null +++ b/include/tensorflow/lite/core/c/c_api.h @@ -0,0 +1,566 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +/// \warning Users of TensorFlow Lite should not include this file directly, +/// but should instead include "third_party/tensorflow/lite/c/c_api.h". +/// Only the TensorFlow Lite implementation itself should include this +/// file directly. +#ifndef TENSORFLOW_LITE_CORE_C_C_API_H_ +#define TENSORFLOW_LITE_CORE_C_C_API_H_ + +#include +#include +#include +#include + +#include "tensorflow/lite/builtin_ops.h" +#include "tensorflow/lite/core/async/c/types.h" +#include "tensorflow/lite/core/c/c_api_types.h" // IWYU pragma: export + +// -------------------------------------------------------------------------- +/// \file +/// C API for TensorFlow Lite. +/// +/// The API leans towards simplicity and uniformity instead of convenience, as +/// most usage will be by language-specific wrappers. It provides largely the +/// same set of functionality as that of the C++ TensorFlow Lite `Interpreter` +/// API, but is useful for shared libraries where having a stable ABI boundary +/// is important. +/// +/// Conventions: +/// * We use the prefix TfLite for everything in the API. +/// * size_t is used to represent byte sizes of objects that are +/// materialized in the address space of the calling process. +/// * int is used as an index into arrays. +/// +/// Usage: +///

+/// // Create the model and interpreter options.
+/// TfLiteModel* model = TfLiteModelCreateFromFile("/path/to/model.tflite");
+/// TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
+/// TfLiteInterpreterOptionsSetNumThreads(options, 2);
+///
+/// // Create the interpreter.
+/// TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
+///
+/// // Allocate tensors and populate the input tensor data.
+/// TfLiteInterpreterAllocateTensors(interpreter);
+/// TfLiteTensor* input_tensor =
+///     TfLiteInterpreterGetInputTensor(interpreter, 0);
+/// TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
+///                            input.size() * sizeof(float));
+///
+/// // Execute inference.
+/// TfLiteInterpreterInvoke(interpreter);
+///
+/// // Extract the output tensor data.
+/// const TfLiteTensor* output_tensor =
+///      TfLiteInterpreterGetOutputTensor(interpreter, 0);
+/// TfLiteTensorCopyToBuffer(output_tensor, output.data(),
+///                          output.size() * sizeof(float));
+///
+/// // Dispose of the model and interpreter objects.
+/// TfLiteInterpreterDelete(interpreter);
+/// TfLiteInterpreterOptionsDelete(options);
+/// TfLiteModelDelete(model);
+///
+/// 
+ +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// This header should be valid in both C (e.g. C99) and C++, +// so 'void' in parameters is not redundant. +// NOLINTBEGIN(modernize-redundant-void-arg) + +// -------------------------------------------------------------------------- +// Opaque types used by the C API. (See also c_api_types.h.) + +/// TfLiteModel wraps a loaded TensorFlow Lite model. +typedef struct TfLiteModel TfLiteModel; + +/// TfLiteInterpreterOptions allows customized interpreter configuration. +typedef struct TfLiteInterpreterOptions TfLiteInterpreterOptions; + +/// TfLiteInterpreter provides inference from a provided model. +typedef struct TfLiteInterpreter TfLiteInterpreter; + +/// A tensor in the interpreter system which is a wrapper around a buffer of +/// data including a dimensionality (or NULL if not currently defined). +typedef struct TfLiteTensor TfLiteTensor; + +/// TfLiteRegistrationExternal is an external version of TfLiteRegistration to +/// use custom op registration API. +/// \warning This is an experimental type and subject to change. +typedef struct TfLiteRegistrationExternal TfLiteRegistrationExternal; + +// -------------------------------------------------------------------------- +/// The TensorFlow Lite Runtime version. +/// +/// Returns a pointer to a statically allocated string that is the version +/// number of the (potentially dynamically loaded) TF Lite Runtime library. +/// TensorFlow Lite uses semantic versioning, and the return value should be +/// in semver 2 format , starting with MAJOR.MINOR.PATCH, +/// e.g. "2.12.0" or "2.13.0-rc2". +TFL_CAPI_EXPORT extern const char* TfLiteVersion(void); + +/// The supported TensorFlow Lite model file Schema version. +/// +/// Returns the (major) version number of the Schema used for model +/// files that is supported by the (potentially dynamically loaded) +/// TensorFlow Lite Runtime. +/// +/// Model files using schema versions different to this may not be supported by +/// the current version of the TF Lite Runtime. +TFL_CAPI_EXPORT int TfLiteSchemaVersion(void); + +/// Returns a model from the provided buffer, or null on failure. +/// +/// \note The caller retains ownership of the `model_data` buffer and should +/// ensure that the lifetime of the `model_data` buffer must be at least as long +/// as the lifetime of the `TfLiteModel` and of any `TfLiteInterpreter` objects +/// created from that `TfLiteModel`, and furthermore the contents of the +/// `model_data` buffer must not be modified during that time." +TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreate(const void* model_data, + size_t model_size); + +/// Same as `TfLiteModelCreate` with customizble error reporter. +/// * `reporter` takes the provided `user_data` object, as well as a C-style +/// format string and arg list (see also vprintf). +/// * `user_data` is optional. If non-null, it is owned by the client and must +/// remain valid for the duration of the interpreter lifetime. +TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateWithErrorReporter( + const void* model_data, size_t model_size, + void (*reporter)(void* user_data, const char* format, va_list args), + void* user_data); + +/// Returns a model from the provided file, or null on failure. +/// +/// \note The file's contents must not be modified during the lifetime of the +/// `TfLiteModel` or of any `TfLiteInterpreter` objects created from that +/// `TfLiteModel`. +TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateFromFile( + const char* model_path); + +/// Same as `TfLiteModelCreateFromFile` with customizble error reporter. +/// * `reporter` takes the provided `user_data` object, as well as a C-style +/// format string and arg list (see also vprintf). +/// * `user_data` is optional. If non-null, it is owned by the client and must +/// remain valid for the duration of the interpreter lifetime. +TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateFromFileWithErrorReporter( + const char* model_path, + void (*reporter)(void* user_data, const char* format, va_list args), + void* user_data); + +/// Destroys the model instance. +TFL_CAPI_EXPORT extern void TfLiteModelDelete(TfLiteModel* model); + +/// Returns a new interpreter options instances. +TFL_CAPI_EXPORT extern TfLiteInterpreterOptions* +TfLiteInterpreterOptionsCreate(); + +/// Creates and returns a shallow copy of an options object. +/// +/// The caller is responsible for calling `TfLiteInterpreterOptionsDelete` to +/// deallocate the object pointed to by the returned pointer. +TFL_CAPI_EXPORT extern TfLiteInterpreterOptions* TfLiteInterpreterOptionsCopy( + const TfLiteInterpreterOptions* from); + +/// Destroys the interpreter options instance. +TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsDelete( + TfLiteInterpreterOptions* options); + +/// Sets the number of CPU threads to use for the interpreter. +TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetNumThreads( + TfLiteInterpreterOptions* options, int32_t num_threads); + +/// Adds a delegate to be applied during `TfLiteInterpreter` creation. +/// +/// If delegate application fails, interpreter creation will also fail with an +/// associated error logged. +/// +/// \note The caller retains ownership of the delegate and should ensure that it +/// remains valid for the duration of any created interpreter's lifetime. +/// +/// If you are NOT using "TensorFlow Lite in Play Services", and NOT building +/// with `TFLITE_WITH_STABLE_ABI` or `TFLITE_USE_OPAQUE_DELEGATE` macros +/// enabled, it is possible to pass a `TfLiteDelegate*` rather than a +/// `TfLiteOpaqueDelegate*` to this function, since in those cases, +/// `TfLiteOpaqueDelegate` is just a typedef alias for `TfLiteDelegate`. +/// This is for compatibility with existing source code +/// and existing delegates. For new delegates, it is recommended to +/// use `TfLiteOpaqueDelegate` rather than `TfLiteDelegate`. (See +/// `TfLiteOpaqueDelegate` in tensorflow/lite/core/c/c_api_types.h.) +TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsAddDelegate( + TfLiteInterpreterOptions* options, TfLiteOpaqueDelegate* delegate); + +/// Sets a custom error reporter for interpreter execution. +/// +/// * `reporter` takes the provided `user_data` object, as well as a C-style +/// format string and arg list (see also vprintf). +/// * `user_data` is optional. If non-null, it is owned by the client and must +/// remain valid for the duration of the interpreter lifetime. +TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetErrorReporter( + TfLiteInterpreterOptions* options, + void (*reporter)(void* user_data, const char* format, va_list args), + void* user_data); + +/// Adds an op registration to be applied during `TfLiteInterpreter` creation. +/// +/// The `TfLiteRegistrationExternal` object is needed to implement custom op of +/// TFLite Interpreter via C API. Calling this function ensures that any +/// `TfLiteInterpreter` created with the specified `options` can execute models +/// that use the custom operator specified in `registration`. +/// Please refer https://www.tensorflow.org/lite/guide/ops_custom for custom op +/// support. +/// \note The caller retains ownership of the TfLiteRegistrationExternal object +/// and should ensure that it remains valid for the duration of any created +/// interpreter's lifetime. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsAddRegistrationExternal( + TfLiteInterpreterOptions* options, + TfLiteRegistrationExternal* registration); + +/// Enables users to cancel in-flight invocations with +/// `TfLiteInterpreterCancel`. +/// +/// By default it is disabled and calling to `TfLiteInterpreterCancel` will +/// return kTfLiteError. See `TfLiteInterpreterCancel`. +/// +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterOptionsEnableCancellation( + TfLiteInterpreterOptions* options, bool enable); + +/// Returns a new interpreter using the provided model and options, or null on +/// failure. +/// +/// * `model` must be a valid model instance. The caller retains ownership of +/// the object, and may destroy it (via TfLiteModelDelete) immediately after +/// creating the interpreter. However, if the TfLiteModel was allocated with +/// TfLiteModelCreate, then the `model_data` buffer that was passed to +/// TfLiteModelCreate must outlive the lifetime of the TfLiteInterpreter +/// object that this function returns, and must not be modified during that +/// time; and if the TfLiteModel was allocated with TfLiteModelCreateFromFile, +/// then the contents of the model file must not be modified during the +/// lifetime of the TfLiteInterpreter object that this function returns. +/// * `optional_options` may be null. The caller retains ownership of the +/// object, and can safely destroy it (via TfLiteInterpreterOptionsDelete) +/// immediately after creating the interpreter. +/// +/// \note The client *must* explicitly allocate tensors before attempting to +/// access input tensor data or invoke the interpreter. +TFL_CAPI_EXPORT extern TfLiteInterpreter* TfLiteInterpreterCreate( + const TfLiteModel* model, const TfLiteInterpreterOptions* optional_options); + +/// Destroys the interpreter. +TFL_CAPI_EXPORT extern void TfLiteInterpreterDelete( + TfLiteInterpreter* interpreter); + +/// Returns the number of input tensors associated with the model. +TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetInputTensorCount( + const TfLiteInterpreter* interpreter); + +/// Returns a pointer to an array of input tensor indices. The length of the +/// array can be obtained via a call to `TfLiteInterpreterGetInputTensorCount`. +/// +/// Typically the input tensors associated with an `interpreter` would be set +/// during the initialization of the `interpreter`, through a mechanism like the +/// `InterpreterBuilder`, and remain unchanged throughout the lifetime of the +/// interpreter. However, there are some circumstances in which the pointer may +/// not remain valid throughout the lifetime of the interpreter, because calls +/// to `SetInputs` on the interpreter invalidate the returned pointer. +/// +/// The ownership of the array remains with the TFLite runtime. +TFL_CAPI_EXPORT const int* TfLiteInterpreterInputTensorIndices( + const TfLiteInterpreter* interpreter); + +/// Returns the tensor associated with the input index. +/// REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor) +TFL_CAPI_EXPORT extern TfLiteTensor* TfLiteInterpreterGetInputTensor( + const TfLiteInterpreter* interpreter, int32_t input_index); + +/// Resizes the specified input tensor. +/// +/// \note After a resize, the client *must* explicitly allocate tensors before +/// attempting to access the resized tensor data or invoke the interpreter. +/// +/// REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor) +/// +/// This function makes a copy of the input dimensions, so the client can safely +/// deallocate `input_dims` immediately after this function returns. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterResizeInputTensor( + TfLiteInterpreter* interpreter, int32_t input_index, const int* input_dims, + int32_t input_dims_size); + +/// Updates allocations for all tensors, resizing dependent tensors using the +/// specified input tensor dimensionality. +/// +/// This is a relatively expensive operation, and need only be called after +/// creating the graph and/or resizing any inputs. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterAllocateTensors( + TfLiteInterpreter* interpreter); + +/// Runs inference for the loaded graph. +/// +/// Before calling this function, the caller should first invoke +/// TfLiteInterpreterAllocateTensors() and should also set the values for the +/// input tensors. After successfully calling this function, the values for the +/// output tensors will be set. +/// +/// \note It is possible that the interpreter is not in a ready state to +/// evaluate (e.g., if AllocateTensors() hasn't been called, or if a +/// ResizeInputTensor() has been performed without a subsequent call to +/// AllocateTensors()). +/// +/// If the (experimental!) delegate fallback option was enabled in the +/// interpreter options, then the interpreter will automatically fall back to +/// not using any delegates if execution with delegates fails. For details, +/// see TfLiteInterpreterOptionsSetEnableDelegateFallback in +/// c_api_experimental.h. +/// +/// Returns one of the following status codes: +/// - kTfLiteOk: Success. Output is valid. +/// - kTfLiteDelegateError: Execution with delegates failed, due to a problem +/// with the delegate(s). If fallback was not enabled, output is invalid. +/// If fallback was enabled, this return value indicates that fallback +/// succeeded, the output is valid, and all delegates previously applied to +/// the interpreter have been undone. +/// - kTfLiteApplicationError: Same as for kTfLiteDelegateError, except that +/// the problem was not with the delegate itself, but rather was +/// due to an incompatibility between the delegate(s) and the +/// interpreter or model. +/// - kTfLiteError: Unexpected/runtime failure. Output is invalid. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterInvoke( + TfLiteInterpreter* interpreter); + +/// Returns the number of output tensors associated with the model. +TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetOutputTensorCount( + const TfLiteInterpreter* interpreter); + +/// Returns a pointer to an array of output tensor indices. The length of the +/// array can be obtained via a call to `TfLiteInterpreterGetOutputTensorCount`. +/// +/// Typically the output tensors associated with an `interpreter` would be set +/// during the initialization of the `interpreter`, through a mechanism like the +/// `InterpreterBuilder`, and remain unchanged throughout the lifetime of the +/// interpreter. However, there are some circumstances in which the pointer may +/// not remain valid throughout the lifetime of the interpreter, because calls +/// to `SetOutputs` on the interpreter invalidate the returned pointer. +/// +/// The ownership of the array remains with the TFLite runtime. +TFL_CAPI_EXPORT const int* TfLiteInterpreterOutputTensorIndices( + const TfLiteInterpreter* interpreter); + +/// Returns the tensor associated with the output index. +/// REQUIRES: 0 <= output_index < TfLiteInterpreterGetOutputTensorCount(tensor) +/// +/// \note The shape and underlying data buffer for output tensors may be not +/// be available until after the output tensor has been both sized and +/// allocated. +/// In general, best practice is to interact with the output tensor *after* +/// calling TfLiteInterpreterInvoke(). +TFL_CAPI_EXPORT extern const TfLiteTensor* TfLiteInterpreterGetOutputTensor( + const TfLiteInterpreter* interpreter, int32_t output_index); + +/// Returns modifiable access to the tensor that corresponds to the +/// specified `index` and is associated with the provided `interpreter`. +/// +/// This requires the `index` to be between 0 and N - 1, where N is the +/// number of tensors in the model. +/// +/// Typically the tensors associated with the `interpreter` would be set during +/// the `interpreter` initialization, through a mechanism like the +/// `InterpreterBuilder`, and remain unchanged throughout the lifetime of the +/// interpreter. However, there are some circumstances in which the pointer may +/// not remain valid throughout the lifetime of the interpreter, because calls +/// to `AddTensors` on the interpreter invalidate the returned pointer. +/// +/// Note the difference between this function and +/// `TfLiteInterpreterGetInputTensor` (or `TfLiteInterpreterGetOutputTensor` for +/// that matter): `TfLiteInterpreterGetTensor` takes an index into the array of +/// all tensors associated with the `interpreter`'s model, whereas +/// `TfLiteInterpreterGetInputTensor` takes an index into the array of input +/// tensors. +/// +/// The ownership of the tensor remains with the TFLite runtime, meaning the +/// caller should not deallocate the pointer. +TFL_CAPI_EXPORT +TfLiteTensor* TfLiteInterpreterGetTensor(const TfLiteInterpreter* interpreter, + int index); + +/// Tries to cancel any in-flight invocation. +/// +/// \note This only cancels `TfLiteInterpreterInvoke` calls that happen before +/// calling this and it does not cancel subsequent invocations. +/// \note Calling this function will also cancel any in-flight invocations of +/// SignatureRunners constructed from this interpreter. +/// Non-blocking and thread safe. +/// +/// Returns kTfLiteError if cancellation is not enabled via +/// `TfLiteInterpreterOptionsEnableCancellation`. +/// +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterCancel( + const TfLiteInterpreter* interpreter); + +// -------------------------------------------------------------------------- +// TfLiteTensor wraps data associated with a graph tensor. +// +// Note that, while the TfLiteTensor struct is not currently opaque, and its +// fields can be accessed directly, these methods are still convenient for +// language bindings. In the future the tensor struct will likely be made opaque +// in the public API. + +/// Returns the type of a tensor element. +TFL_CAPI_EXPORT extern TfLiteType TfLiteTensorType(const TfLiteTensor* tensor); + +/// Returns the number of dimensions that the tensor has. Returns -1 in case +/// the 'opaque_tensor' does not have its dimensions property set. +TFL_CAPI_EXPORT extern int32_t TfLiteTensorNumDims(const TfLiteTensor* tensor); + +/// Returns the length of the tensor in the "dim_index" dimension. +/// REQUIRES: 0 <= dim_index < TFLiteTensorNumDims(tensor) +TFL_CAPI_EXPORT extern int32_t TfLiteTensorDim(const TfLiteTensor* tensor, + int32_t dim_index); + +/// Returns the size of the underlying data in bytes. +TFL_CAPI_EXPORT extern size_t TfLiteTensorByteSize(const TfLiteTensor* tensor); + +/// Returns a pointer to the underlying data buffer. +/// +/// \note The result may be null if tensors have not yet been allocated, e.g., +/// if the Tensor has just been created or resized and `TfLiteAllocateTensors()` +/// has yet to be called, or if the output tensor is dynamically sized and the +/// interpreter hasn't been invoked. +TFL_CAPI_EXPORT extern void* TfLiteTensorData(const TfLiteTensor* tensor); + +/// Returns the (null-terminated) name of the tensor. +TFL_CAPI_EXPORT extern const char* TfLiteTensorName(const TfLiteTensor* tensor); + +/// Returns the parameters for asymmetric quantization. The quantization +/// parameters are only valid when the tensor type is `kTfLiteUInt8` and the +/// `scale != 0`. Quantized values can be converted back to float using: +/// real_value = scale * (quantized_value - zero_point); +TFL_CAPI_EXPORT extern TfLiteQuantizationParams TfLiteTensorQuantizationParams( + const TfLiteTensor* tensor); + +/// Copies from the provided input buffer into the tensor's buffer. +/// REQUIRES: input_data_size == TfLiteTensorByteSize(tensor) +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteTensorCopyFromBuffer( + TfLiteTensor* tensor, const void* input_data, size_t input_data_size); + +/// Copies to the provided output buffer from the tensor's buffer. +/// REQUIRES: output_data_size == TfLiteTensorByteSize(tensor) +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteTensorCopyToBuffer( + const TfLiteTensor* output_tensor, void* output_data, + size_t output_data_size); + +/// Returns a new TfLiteRegistrationExternal instance. +/// +/// \note The caller retains ownership and should ensure that +/// the lifetime of the `TfLiteRegistrationExternal` must be at least as long as +/// the lifetime of the `TfLiteInterpreter`. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteRegistrationExternal* +TfLiteRegistrationExternalCreate(TfLiteBuiltinOperator builtin_code, + const char* custom_name, int version); + +/// Return the builtin op code of the provided external 'registration'. +/// +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteBuiltinOperator +TfLiteRegistrationExternalGetBuiltInCode( + const TfLiteRegistrationExternal* registration); + +/// Return the OP version of the provided external 'registration'. Return -1 +/// in case of error, or if the provided address is null. +/// +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern int TfLiteRegistrationExternalGetVersion( + const TfLiteRegistrationExternal* registration); + +/// Returns the custom name of the provided 'registration'. The returned pointer +/// will be non-null iff the op is a custom op. +/// +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern const char* TfLiteRegistrationExternalGetCustomName( + const TfLiteRegistrationExternal* registration); + +/// Destroys the TfLiteRegistrationExternal instance. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalDelete( + TfLiteRegistrationExternal* registration); + +/// Sets the initialization callback for the registration. +/// +/// The callback is called to initialize the op from serialized data. +/// Please refer `init` of `TfLiteRegistration` for the detail. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetInit( + TfLiteRegistrationExternal* registration, + void* (*init)(TfLiteOpaqueContext* context, const char* buffer, + size_t length)); + +/// Sets the deallocation callback for the registration. +/// +/// This callback is called to deallocate the data returned by the init +/// callback. The value passed in the `data` parameter is the value that was +/// returned by the `init` callback. +/// Please refer `free` of `TfLiteRegistration` for the detail. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetFree( + TfLiteRegistrationExternal* registration, + void (*free)(TfLiteOpaqueContext* context, void* data)); + +/// Sets the preparation callback for the registration. +/// +/// The callback is called when the inputs of operator have been resized. +/// Please refer `prepare` of `TfLiteRegistration` for the detail. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetPrepare( + TfLiteRegistrationExternal* registration, + TfLiteStatus (*prepare)(TfLiteOpaqueContext* context, + TfLiteOpaqueNode* node)); + +/// Sets the invocation callback for the registration. +/// +/// The callback is called when the operator is executed. +/// Please refer `invoke` of `TfLiteRegistration` for the detail. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetInvoke( + TfLiteRegistrationExternal* registration, + TfLiteStatus (*invoke)(TfLiteOpaqueContext* context, + TfLiteOpaqueNode* node)); + +/// Sets the async kernel accessor callback for the registration. +/// +/// The callback is called to retrieve the async kernel if the delegate supports +/// it. If the delegate does not support async execution, either this function +/// should not be called, or `async_kernel` needs to be nullptr. +/// `node` is the delegate TfLiteNode created by `ModifyGraphWithDelegate`. +/// Please refer `async_kernel` of `TfLiteRegistration` for the detail. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetAsyncKernel( + TfLiteRegistrationExternal* registration, + TfLiteAsyncKernel* (*async_kernel)(TfLiteOpaqueContext* context, + TfLiteOpaqueNode* node)); + +// NOLINTEND(modernize-redundant-void-arg) + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif // TENSORFLOW_LITE_CORE_C_C_API_H_ diff --git a/include/tensorflow/lite/core/c/c_api_experimental.h b/include/tensorflow/lite/core/c/c_api_experimental.h new file mode 100644 index 00000000000..f203f8a4198 --- /dev/null +++ b/include/tensorflow/lite/core/c/c_api_experimental.h @@ -0,0 +1,468 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +/// WARNING: Users of TensorFlow Lite should not include this file directly, +/// but should instead include +/// "third_party/tensorflow/lite/c/c_api_experimental.h". +/// Only the TensorFlow Lite implementation itself should include this +/// file directly. +#ifndef TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ +#define TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ + +#include "tensorflow/lite/builtin_ops.h" +#include "tensorflow/lite/core/c/c_api.h" +#include "tensorflow/lite/core/c/common.h" + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// -------------------------------------------------------------------------- +// Opaque types used by the C API. + +/// TfLiteSignatureRunner is used to run inference on a signature. +/// +/// Note: A signature is used to define a computation in a TF model. A model can +/// have multiple signatures. Each signature contains three components: +/// * Signature Key: A unique string to identify a signature +/// * Inputs: A list of names, each mapped to an input tensor of a signature +/// * Outputs: A list of names, each mapped to an output tensor of a signature +/// +/// To learn more about signatures in TFLite, refer to: +/// https://www.tensorflow.org/lite/guide/signatures +/// +/// Using the TfLiteSignatureRunner, for a particular signature, you can set its +/// inputs, invoke (i.e. execute) the computation, and retrieve its outputs. +typedef struct TfLiteSignatureRunner TfLiteSignatureRunner; + +// -------------------------------------------------------------------------- +/// Resets all variable tensors to zero. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterResetVariableTensors( + TfLiteInterpreter* interpreter); + +/// Adds an op registration for a builtin operator. +/// +/// Op registrations are used to map ops referenced in the flatbuffer model +/// to executable function pointers (`TfLiteRegistration`s). +/// +/// NOTE: The interpreter will make a shallow copy of `registration` internally, +/// so the caller should ensure that its contents (function pointers, etc...) +/// remain valid for the duration of the interpreter's lifetime. A common +/// practice is making the provided `TfLiteRegistration` instance static. +/// +/// Code that uses this function should NOT call +/// `TfLiteInterpreterOptionsSetOpResolver` (or related functions) on the same +/// options object. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT void TfLiteInterpreterOptionsAddBuiltinOp( + TfLiteInterpreterOptions* options, TfLiteBuiltinOperator op, + const TfLiteRegistration* registration, int32_t min_version, + int32_t max_version); + +/// Adds an op registration for a custom operator. +/// +/// Op registrations are used to map ops referenced in the flatbuffer model +/// to executable function pointers (`TfLiteRegistration`s). +/// +/// NOTE: The interpreter will make a shallow copy of `registration` internally, +/// so the caller should ensure that its contents (function pointers, etc...) +/// remain valid for the duration of any created interpreter's lifetime. A +/// common practice is making the provided `TfLiteRegistration` instance static. +/// +/// The lifetime of the string pointed to by `name` must be at least as long +/// as the lifetime of the `TfLiteInterpreterOptions`. +/// +/// Code that uses this function should NOT call +/// `TfLiteInterpreterOptionsSetOpResolver` (or related functions) on the same +/// options object. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT void TfLiteInterpreterOptionsAddCustomOp( + TfLiteInterpreterOptions* options, const char* name, + const TfLiteRegistration* registration, int32_t min_version, + int32_t max_version); + +/// Registers callbacks for resolving builtin or custom operators. +/// +/// The `TfLiteInterpreterOptionsSetOpResolverExternal` function provides an +/// alternative method for registering builtin ops and/or custom ops, by +/// providing operator resolver callbacks. Unlike using +/// `TfLiteInterpreterOptionsAddBuiltinOp` and/or +/// `TfLiteInterpreterOptionsAddAddCustomOp`, these let you register all the +/// operators in a single call. +/// +/// Code that uses this function should NOT call +/// `TfLiteInterpreterOptionsAddBuiltin` or +/// `TfLiteInterpreterOptionsAddCustomOp` on the same options object. +/// +/// If `op_resolver_user_data` is non-null, its lifetime must be at least as +/// long as the lifetime of the `TfLiteInterpreterOptions`. +/// +/// WARNING: This is an experimental API and subject to change. +void TfLiteInterpreterOptionsSetOpResolverExternal( + TfLiteInterpreterOptions* options, + const TfLiteRegistrationExternal* (*find_builtin_op)(void* user_data, + int op, int version), + const TfLiteRegistrationExternal* (*find_custom_op)(void* user_data, + const char* custom_op, + int version), + void* op_resolver_user_data); + +/// Registers callbacks for resolving builtin or custom operators. +/// +/// The `TfLiteInterpreterOptionsSetOpResolver` function provides an alternative +/// method for registering builtin ops and/or custom ops, by providing operator +/// resolver callbacks. Unlike using `TfLiteInterpreterOptionsAddBuiltinOp` +/// and/or `TfLiteInterpreterOptionsAddAddCustomOp`, these let you register all +/// the operators in a single call. +/// +/// Code that uses this function should NOT call +/// `TfLiteInterpreterOptionsAddBuiltin` or +/// `TfLiteInterpreterOptionsAddCustomOp` on the same options object. +/// +/// If `op_resolver_user_data` is non-null, its lifetime must be at least as +/// long as the lifetime of the `TfLiteInterpreterOptions`. +/// +/// WARNING: This is an experimental API and subject to change. +/// +/// DEPRECATED: use TfLiteInterpreterOptionsSetOpResolverExternal instead. +void TfLiteInterpreterOptionsSetOpResolver( + TfLiteInterpreterOptions* options, + const TfLiteRegistration* (*find_builtin_op)(void* user_data, + TfLiteBuiltinOperator op, + int version), + const TfLiteRegistration* (*find_custom_op)(void* user_data, + const char* custom_op, + int version), + void* op_resolver_user_data); + +/// \private +/// Backward-compat version of TfLiteInterpreterOptionsSetOpResolver. +/// +/// WARNING: This function is deprecated / not an official part of the API, is +/// only for binary backwards compatibility, and should not be called. +void TfLiteInterpreterOptionsSetOpResolverV2( + TfLiteInterpreterOptions* options, + const TfLiteRegistration_V2* (*find_builtin_op_v2)(void* user_data, + TfLiteBuiltinOperator op, + int version), + const TfLiteRegistration_V2* (*find_custom_op_v2)(void* user_data, + const char* op, + int version), + void* op_resolver_user_data); + +/// \private +/// Backward-compat version of TfLiteInterpreterOptionsSetOpResolver. +/// +/// WARNING: This function is deprecated / not an official part of the API, is +/// only for binary backwards compatibility, and should not be called. +void TfLiteInterpreterOptionsSetOpResolverV1( + TfLiteInterpreterOptions* options, + const TfLiteRegistration_V1* (*find_builtin_op_v1)(void* user_data, + TfLiteBuiltinOperator op, + int version), + const TfLiteRegistration_V1* (*find_custom_op_v1)(void* user_data, + const char* op, + int version), + void* op_resolver_user_data); + +/// Returns a new interpreter using the provided model and options, or null on +/// failure, where the model uses only the operators explicitly added to the +/// options. This is the same as `TFLiteInterpreterCreate` from `c_api.h`, +/// except that the only operators that are supported are the ones registered +/// in `options` via calls to `TfLiteInterpreterOptionsSetOpResolver`, +/// `TfLiteInterpreterOptionsAddBuiltinOp`, and/or +/// `TfLiteInterpreterOptionsAddCustomOp`. +/// +/// * `model` must be a valid model instance. The caller retains ownership of +/// the object, and can destroy it immediately after creating the interpreter; +/// the interpreter will maintain its own reference to the underlying model +/// data. +/// * `options` should not be null. The caller retains ownership of the object, +/// and can safely destroy it immediately after creating the interpreter. +/// +/// NOTE: The client *must* explicitly allocate tensors before attempting to +/// access input tensor data or invoke the interpreter. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteInterpreter* +TfLiteInterpreterCreateWithSelectedOps(const TfLiteModel* model, + const TfLiteInterpreterOptions* options); + +/// Enable or disable the NN API delegate for the interpreter (true to enable). +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetUseNNAPI( + TfLiteInterpreterOptions* options, bool enable); + +/// Enable or disable CPU fallback for the interpreter (true to enable). +/// If enabled, TfLiteInterpreterInvoke will do automatic fallback from +/// executing with delegate(s) to regular execution without delegates +/// (i.e. on CPU). +/// +/// Allowing the fallback is suitable only if both of the following hold: +/// - The caller is known not to cache pointers to tensor data across +/// TfLiteInterpreterInvoke calls. +/// - The model is not stateful (no variables, no LSTMs) or the state isn't +/// needed between batches. +/// +/// When delegate fallback is enabled, TfLiteInterpreterInvoke will +/// behave as follows: +/// If one or more delegates were set in the interpreter options +/// (see TfLiteInterpreterOptionsAddDelegate), +/// AND inference fails, +/// then the interpreter will fall back to not using any delegates. +/// In that case, the previously applied delegate(s) will be automatically +/// undone, and an attempt will be made to return the interpreter to an +/// invokable state, which may invalidate previous tensor addresses, +/// and the inference will be attempted again, using input tensors with +/// the same value as previously set. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetEnableDelegateFallback( + TfLiteInterpreterOptions* options, bool enable); + +// Set if buffer handle output is allowed. +// +/// When using hardware delegation, Interpreter will make the data of output +/// tensors available in `tensor->data` by default. If the application can +/// consume the buffer handle directly (e.g. reading output from OpenGL +/// texture), it can set this flag to false, so Interpreter won't copy the +/// data from buffer handle to CPU memory. WARNING: This is an experimental +/// API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteSetAllowBufferHandleOutput( + const TfLiteInterpreter* interpreter, bool allow_buffer_handle_output); + +/// Allow a delegate to look at the graph and modify the graph to handle +/// parts of the graph themselves. After this is called, the graph may +/// contain new nodes that replace 1 more nodes. +/// 'delegate' must outlive the interpreter. +/// Use `TfLiteInterpreterOptionsAddDelegate` instead of this unless +/// absolutely required. +/// Returns one of the following three status codes: +/// 1. kTfLiteOk: Success. +/// 2. kTfLiteDelegateError: Delegation failed due to an error in the +/// delegate. The Interpreter has been restored to its pre-delegation state. +/// NOTE: This undoes all delegates previously applied to the Interpreter. +/// 3. kTfLiteError: Unexpected/runtime failure. +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterModifyGraphWithDelegate( + const TfLiteInterpreter* interpreter, TfLiteDelegate* delegate); + +/// Returns the tensor index corresponding to the input tensor +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetInputTensorIndex( + const TfLiteInterpreter* interpreter, int32_t input_index); + +/// Returns the tensor index corresponding to the output tensor +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetOutputTensorIndex( + const TfLiteInterpreter* interpreter, int32_t output_index); + +/// -------------------------------------------------------------------------- +/// SignatureRunner APIs +/// +/// You can run inference by either: +/// +/// (i) (recommended) using the Interpreter to initialize SignatureRunner(s) and +/// then only using SignatureRunner APIs. +/// +/// (ii) only using Interpreter APIs. +/// +/// NOTE: +/// * Only use one of the above options to run inference, i.e. avoid mixing both +/// SignatureRunner APIs and Interpreter APIs to run inference as they share +/// the same underlying data (e.g. updating an input tensor “A” retrieved +/// using the Interpreter APIs will update the state of the input tensor “B” +/// retrieved using SignatureRunner APIs, if they point to the same underlying +/// tensor in the model; as it is not possible for a user to debug this by +/// analyzing the code, it can lead to undesirable behavior). +/// * The TfLiteSignatureRunner type is conditionally thread-safe, provided that +/// no two threads attempt to simultaneously access two TfLiteSignatureRunner +/// instances that point to the same underlying signature, or access a +/// TfLiteSignatureRunner and its underlying TfLiteInterpreter, unless all +/// such simultaneous accesses are reads (rather than writes). +/// * The lifetime of a TfLiteSignatureRunner object ends when +/// TfLiteSignatureRunnerDelete() is called on it (or when the lifetime of the +/// underlying TfLiteInterpreter ends -- but you should call +/// TfLiteSignatureRunnerDelete() before that happens in order to avoid +/// resource leaks). +/// * You can only apply delegates to the interpreter (via +/// TfLiteInterpreterOptions) and not to a signature. + +/// Returns the number of signatures defined in the model. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetSignatureCount( + const TfLiteInterpreter* interpreter); + +/// Returns the key of the Nth signature in the model, where N is specified as +/// `signature_index`. +/// +/// NOTE: The lifetime of the returned key is the same as (and depends on) the +/// lifetime of `interpreter`. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern const char* TfLiteInterpreterGetSignatureKey( + const TfLiteInterpreter* interpreter, int32_t signature_index); + +/// Returns a new signature runner using the provided interpreter and signature +/// key, or nullptr on failure. +/// +/// NOTE: `signature_key` is a null-terminated C string that must match the +/// key of a signature in the interpreter's model. +/// +/// NOTE: The returned signature runner should be destroyed, by calling +/// TfLiteSignatureRunnerDelete(), before the interpreter is destroyed. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteSignatureRunner* +TfLiteInterpreterGetSignatureRunner(const TfLiteInterpreter* interpreter, + const char* signature_key); + +/// Returns the number of inputs associated with a signature. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern size_t TfLiteSignatureRunnerGetInputCount( + const TfLiteSignatureRunner* signature_runner); + +/// Returns the (null-terminated) name of the Nth input in a signature, where N +/// is specified as `input_index`. +/// +/// NOTE: The lifetime of the returned name is the same as (and depends on) the +/// lifetime of `signature_runner`. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern const char* TfLiteSignatureRunnerGetInputName( + const TfLiteSignatureRunner* signature_runner, const int32_t input_index); + +/// Resizes the input tensor identified as `input_name` to be the dimensions +/// specified by `input_dims` and `input_dims_size`. Only unknown dimensions can +/// be resized with this function. Unknown dimensions are indicated as `-1` in +/// the `dims_signature` attribute of a TfLiteTensor. +/// +/// Returns status of failure or success. Note that this doesn't actually resize +/// any existing buffers. A call to TfLiteSignatureRunnerAllocateTensors() is +/// required to change the tensor input buffer. +/// +/// NOTE: This function is similar to TfLiteInterpreterResizeInputTensorStrict() +/// and not TfLiteInterpreterResizeInputTensor(). +/// +/// NOTE: `input_name` must match the name of an input in the signature. +/// +/// NOTE: This function makes a copy of the input dimensions, so the caller can +/// safely deallocate `input_dims` immediately after this function returns. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteSignatureRunnerResizeInputTensor( + TfLiteSignatureRunner* signature_runner, const char* input_name, + const int* input_dims, int32_t input_dims_size); + +/// Updates allocations for tensors associated with a signature and resizes +/// dependent tensors using the specified input tensor dimensionality. +/// This is a relatively expensive operation and hence should only be called +/// after initializing the signature runner object and/or resizing any inputs. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteSignatureRunnerAllocateTensors( + TfLiteSignatureRunner* signature_runner); + +/// Returns the input tensor identified by `input_name` in the given signature. +/// Returns nullptr if the given name is not valid. +/// +/// NOTE: The lifetime of the returned tensor is the same as (and depends on) +/// the lifetime of `signature_runner`. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteTensor* TfLiteSignatureRunnerGetInputTensor( + TfLiteSignatureRunner* signature_runner, const char* input_name); + +/// Runs inference on a given signature. +/// +/// Before calling this function, the caller should first invoke +/// TfLiteSignatureRunnerAllocateTensors() and should also set the values for +/// the input tensors. After successfully calling this function, the values for +/// the output tensors will be set. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteSignatureRunnerInvoke( + TfLiteSignatureRunner* signature_runner); + +/// Returns the number of output tensors associated with the signature. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern size_t TfLiteSignatureRunnerGetOutputCount( + const TfLiteSignatureRunner* signature_runner); + +/// Returns the (null-terminated) name of the Nth output in a signature, where +/// N is specified as `output_index`. +/// +/// NOTE: The lifetime of the returned name is the same as (and depends on) the +/// lifetime of `signature_runner`. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern const char* TfLiteSignatureRunnerGetOutputName( + const TfLiteSignatureRunner* signature_runner, int32_t output_index); + +/// Returns the output tensor identified by `output_name` in the given +/// signature. Returns nullptr if the given name is not valid. +/// +/// NOTE: The lifetime of the returned tensor is the same as (and depends on) +/// the lifetime of `signature_runner`. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern const TfLiteTensor* TfLiteSignatureRunnerGetOutputTensor( + const TfLiteSignatureRunner* signature_runner, const char* output_name); + +/// Attempts to cancel in flight invocation if any. +/// This will not affect calls to `Invoke` that happend after this. +/// Non blocking and thread safe. +/// Returns kTfLiteError if cancellation is not enabled, otherwise returns +/// kTfLiteOk. +/// NOTE: Calling this function will cancel in-flight invocations +/// in all SignatureRunners built from the same interpreter. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteSignatureRunnerCancel( + TfLiteSignatureRunner* signature_runner); + +/// Destroys the signature runner. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteSignatureRunnerDelete( + TfLiteSignatureRunner* signature_runner); + +// Forward declaration, to avoid need for dependency on +// tensorflow/lite/profiling/telemetry/profiler.h. +struct TfLiteTelemetryProfilerStruct; + +/// Registers the telemetry profiler to the interpreter. +/// Note: The interpreter does not take the ownership of profiler, but callers +/// must ensure profiler->data outlives the lifespan of the interpreter. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetTelemetryProfiler( + TfLiteInterpreterOptions* options, + struct TfLiteTelemetryProfilerStruct* profiler); + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif // TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ diff --git a/include/tensorflow/lite/core/c/c_api_opaque.h b/include/tensorflow/lite/core/c/c_api_opaque.h new file mode 100644 index 00000000000..749383b9ffd --- /dev/null +++ b/include/tensorflow/lite/core/c/c_api_opaque.h @@ -0,0 +1,548 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_CORE_C_C_API_OPAQUE_H_ +#define TENSORFLOW_LITE_CORE_C_C_API_OPAQUE_H_ + +#include "tensorflow/lite/core/c/c_api.h" +#include "tensorflow/lite/core/c/c_api_types.h" // IWYU pragma: export +#include "tensorflow/lite/core/c/common.h" + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// -------------------------------------------------------------------------- +/// C API for TensorFlow Lite Opaque Types. +/// +/// These APIs are accessors for TFLite Opaque Types. These APIs are primarily +/// intended to be used by delegates and custom OP implementations. +/// +/// WARNING: This is an experimental API and subject to change. + +// -------------------------------------------------------------------------- +// Accessors for TfLiteOpaqueTensor. + +// Returns the type of a tensor element. +TFL_CAPI_EXPORT extern TfLiteType TfLiteOpaqueTensorType( + const TfLiteOpaqueTensor* opaque_tensor); + +// Returns the number of dimensions that the tensor has. Returns -1 in case +// the 'opaque_tensor' does not have its dimensions property set. +TFL_CAPI_EXPORT extern int32_t TfLiteOpaqueTensorNumDims( + const TfLiteOpaqueTensor* opaque_tensor); + +// Returns the length of the tensor in the "dim_index" dimension. +TFL_CAPI_EXPORT extern int32_t TfLiteOpaqueTensorDim( + const TfLiteOpaqueTensor* opaque_tensor, int32_t dim_index); + +// Loads into the provided 'num_dims' the number of dimensions that the tensor's +// signature has. Returns 'kTfLiteOk' if 'num_dims' was successfully loaded. Any +// other return code indicates an error and 'num_dims' won't be loaded. +// +// A tensor's dimension signature encodes shapes with unknown dimensions with +// -1. E.g. for a tensor with three dimensions, whose first dimension has an +// unknown size, and the second and third dimension have a size of 2, the +// dimension signature is [-1,2,2], and 'TfLiteOpaqueTensorGetNumDimsSignature' +// loads 3 into 'num_dims'. If the tensor does not have its dimension signature +// field set then 'num_dims' is set to -1. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueTensorGetNumDimsSignature( + const TfLiteOpaqueTensor* opaque_tensor, int32_t* num_dims); + +// Loads into the provided 'dim_length' the length of the tensor in the +// 'dim_index' signature dimension or -1 if that dimension has unknown length. +// Returns 'kTfLiteOk' if 'dim_length' was successfully loaded. Any +// other return code indicates an error and 'dim_length' won't be loaded. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueTensorGetDimSignature( + const TfLiteOpaqueTensor* opaque_tensor, int32_t dim_index, + int32_t* dim_length); + +// Returns 'non-zero' if the provided 'opaque_tensor' is a variable, and returns +// zero otherwise. +TFL_CAPI_EXPORT extern int TfLiteOpaqueTensorIsVariable( + const TfLiteOpaqueTensor* opaque_tensor); + +// Returns the size of the underlying data in bytes. +TFL_CAPI_EXPORT extern size_t TfLiteOpaqueTensorByteSize( + const TfLiteOpaqueTensor* opaque_tensor); + +// Returns a pointer to the underlying data buffer. +TFL_CAPI_EXPORT extern void* TfLiteOpaqueTensorData( + const TfLiteOpaqueTensor* opaque_tensor); + +// Returns the 'opaque_tensor's allocation type. +TFL_CAPI_EXPORT extern TfLiteAllocationType TfLiteOpaqueTensorGetAllocationType( + const TfLiteOpaqueTensor* opaque_tensor); + +// Returns the (null-terminated) name of the tensor. +TFL_CAPI_EXPORT extern const char* TfLiteOpaqueTensorName( + const TfLiteOpaqueTensor* opaque_tensor); + +// Returns the 'opaque_tensor's quantization information. +TFL_CAPI_EXPORT extern TfLiteQuantization TfLiteOpaqueTensorGetQuantization( + const TfLiteOpaqueTensor* opaque_tensor); + +// Returns the 'opaque_tensor's quantization parameters. +TFL_CAPI_EXPORT extern TfLiteQuantizationParams +TfLiteOpaqueTensorGetQuantizationParams( + const TfLiteOpaqueTensor* opaque_tensor); + +// Copies from the provided input buffer into the tensor's buffer. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueTensorCopyFromBuffer( + TfLiteOpaqueTensor* opaque_tensor, const void* input_data, + size_t input_data_size); + +// Copies to the provided output buffer from the tensor's buffer. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueTensorCopyToBuffer( + const TfLiteOpaqueTensor* opaque_tensor, void* output_data, + size_t output_data_size); + +// -------------------------------------------------------------------------- +// Accessors for TfLiteOpaqueNode. + +// Returns the input tensor of the given node. +TFL_CAPI_EXPORT extern const TfLiteOpaqueTensor* TfLiteOpaqueNodeGetInput( + const TfLiteOpaqueContext* opaque_context, + const TfLiteOpaqueNode* opaque_node, int index); + +// Returns the output tensor of the given node. +TFL_CAPI_EXPORT extern TfLiteOpaqueTensor* TfLiteOpaqueNodeGetOutput( + TfLiteOpaqueContext* opaque_context, const TfLiteOpaqueNode* opaque_node, + int index); + +// Gets the number of input tensors of the provided 'opaque_node'. +TFL_CAPI_EXPORT int TfLiteOpaqueNodeNumberOfInputs( + const TfLiteOpaqueNode* opaque_node); + +// Gets the number of output tensors of the provided 'opaque_node'. +TFL_CAPI_EXPORT int TfLiteOpaqueNodeNumberOfOutputs( + const TfLiteOpaqueNode* opaque_node); + +// Returns opaque data provided by the node implementer. The value returned +// from this function is the value that was returned from the `init` callback +// that was passed to `TfLiteRegistrationExternalSetInit`. +TFL_CAPI_EXPORT extern void* TfLiteOpaqueNodeGetUserData( + const TfLiteOpaqueNode* opaque_node); + +// Returns the builtin data associated with the provided 'opaque_node'. +// +// The builtin init data associated with a node would typically be set during +// the creation of the associated interpreter, through a mechanism like the +// interpreter builder that loads a TFLite model and initialises the +// interpreter's nodes accordingly. Under these conditions the returned address +// remains valid throughout the lifetime of the 'opaque_node'. +TFL_CAPI_EXPORT extern void* TfLiteOpaqueNodeGetBuiltinData( + const TfLiteOpaqueNode* opaque_node); + +// Loads into the provided '*init_data' pointer the address of the custom init +// data associated with the provided 'opaque_node'. The length of data is +// loaded into the provided 'size' pointer. Returns 'kTfLiteOk' in case +// of success. Any other return value indicates a failure and will leave +// 'init_data' and 'size' in an unspecified state. +// +// The custom init data associated with a node would typically be set during the +// creation of the associated interpreter, through a mechanism like the +// interpreter builder that loads a TFLite model and initialises the +// interpreter's nodes accordingly. Under these conditions the returned address +// remains valid throughout the lifetime of the 'opaque_node'. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueNodeGetCustomInitialData( + const TfLiteOpaqueNode* opaque_node, const void** init_data, int* size); + +// Loads into the provided '*inputs' pointer the starting address of an array +// of indices representing the tensors that are inputs of the provided +// 'opaque_node'. The length of the array is loaded into the provided +// 'num_inputs' pointer. Returns 'kTfLiteOk' in case of success. Any other +// return value indicates a failure and will leave 'inputs' and +// 'num_inputs' in an unspecified state. +// +// The input tensors associated with a node would typically be set during the +// creation of the associated interpreter, through a mechanism like the +// interpreter builder that loads a TFLite model and initialises the +// interpreter's nodes accordingly. Under these conditions the loaded address +// remains valid throughout the lifetime of the 'opaque_node'. +TFL_CAPI_EXPORT TfLiteStatus TfLiteOpaqueNodeInputs( + const TfLiteOpaqueNode* opaque_node, const int** inputs, int* num_inputs); + +// Loads into the provided '*outputs' pointer the starting address of an array +// of indices representing the tensors that are outputs of the provided +// 'opaque_node'. The length of the array is loaded into the provided +// 'num_outputs' pointer. Returns 'kTfLiteOk' in case of success. Any other +// return value indicates a failure and will leave 'outputs' and +// 'num_outputs' in an unspecified state. +// +// The output tensors associated with a node would typically be set during the +// creation of the associated interpreter, through a mechanism like the +// interpreter builder that loads a TFLite model and initialises the +// interpreter's nodes accordingly. Under these conditions the loaded address +// remains valid throughout the lifetime of the 'opaque_node'. +TFL_CAPI_EXPORT TfLiteStatus TfLiteOpaqueNodeOutputs( + const TfLiteOpaqueNode* opaque_node, const int** outputs, int* num_outputs); + +// Loads into the provided '*temporaries' pointer the starting address of an +// array of indices representing the temporary tensors associated with the +// provided 'opaque_node'. The length of the array is loaded into the provided +// 'num_temporaries' pointer. Returns 'kTfLiteOk' in case of success. Any other +// return value indicates a failure and will leave 'temporaries' and +// 'num_temporaries' in an unspecified state. +// +// The temporary tensors associated with a node would typically be set during +// the creation of the associated interpreter, through a mechanism like the +// interpreter builder that loads a TFLite model and initialises the +// interpreter's nodes accordingly. Under these conditions the loaded address +// remains valid throughout the lifetime of the 'opaque_node'. +TFL_CAPI_EXPORT +TfLiteStatus TfLiteOpaqueNodeTemporaries(const TfLiteOpaqueNode* opaque_node, + const int** temporaries, + int* num_temporaries); + +// -------------------------------------------------------------------------- +// Accessors for TfLiteOpaqueContext. + +typedef struct TfLiteIntArray TfLiteIntArray; + +// Loads the provided `execution_plan` associated with the provided +// `opaque_context`. Returns `kTfLiteOk` if the `execution_plan` was +// successfully loaded. A return value different from `kTfLiteOk` indicates a +// failure and the `execution_plan` will be left in an unspecified state. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteOpaqueContextGetExecutionPlan( + TfLiteOpaqueContext* opaque_context, TfLiteIntArray** execution_plan); + +// Given the specified 'opaque_context' and 'node_index', load the caller's +// opaque '*node' and '*registration_external' pointer. Return 'kTfLiteOk' if +// both the '*node' as well as the '*registration_external' have been loaded +// correctly. Any other return code indicates a failure and both '*node' as +// well as '*registration_external' will be in an unspecified state. +// +// A caller can obtain a node's index by calling +// 'TfLiteOpaqueContextGetExecutionPlan', which provides an array of node +// indices, sorted in execution order. A node index might also come from the +// data structures passed to the delegate kernel's callback parameters, like the +// delegate parameters data structure passed to the 'init' callback that +// contains an array of node indices that are meant to be handled by the +// delegate kernel. +// +// This function is expected to be called from within a delegate callback, like +// 'Prepare', or a delegate kernel callback (i.e., a callback registered with +// a 'TfLiteRegistrationExternal' object). +// +// The loaded '*node' and '*registration_external' pointers will generally +// remain valid for the lifetime of the associated 'opaque_context', but can be +// invalidated through API calls where delegates get un-applied, like API calls +// that modify the model graph via a delegate, or if input tensors get re-sized. +// +// TODO(b/237983452): Further clarify the lifetime guarantees of pointers that +// are returned to the users and which actions invalidate them. +TFL_CAPI_EXPORT TfLiteStatus TfLiteOpaqueContextGetNodeAndRegistration( + struct TfLiteOpaqueContext* opaque_context, int node_index, + TfLiteOpaqueNode** node, + TfLiteRegistrationExternal** registration_external); + +// WARNING: This is an experimental API and subject to change. +// Entry point for C API ReplaceNodeSubsetsWithDelegateKernels +// +// Replaces the specified `nodes_to_replace` that are associated with the +// provided `opaque_context` with delegate kernels. The provided +// `registration_external` represents the delegate kernel and will be used for +// each node subset that will be delegate to the provided `opaque_delegate`. +// +// The TF Lite runtime will take ownership of the `registration_external` and +// will delete it when the associated `opaque_context` gets destroyed. +// +// The ownership of the `nodes_to_replace` and the `opaque_delegate` remains +// with the caller. +TFL_CAPI_EXPORT TfLiteStatus +TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels( + struct TfLiteOpaqueContext* opaque_context, + TfLiteRegistrationExternal* registration_external, + const TfLiteIntArray* nodes_to_replace, + TfLiteOpaqueDelegate* opaque_delegate); + +// Returns modifiable access to the opaque tensor that corresponds to the +// specified `index` and is associated with the provided `opaque_context`. +// +// This requires the `index` to be between 0 and N - 1, where N is the +// number of tensors in the model. +// +// Typically the tensors associated with the `context` would be set +// during the initialization of the `interpreter` that the `context` belongs to, +// through a mechanism like the `InterpreterBuilder`, and remain unchanged +// throughout the lifetime of the interpreter. However, there are some +// circumstances in which the pointer may not remain valid throughout the +// lifetime of the interpreter, because calls to `AddTensors` on the interpreter +// invalidate the returned pointer. +// +// The ownership of the tensor remains with the TFLite runtime, meaning the +// caller should not deallocate the pointer. +TFL_CAPI_EXPORT +TfLiteOpaqueTensor* TfLiteOpaqueContextGetOpaqueTensor( + const TfLiteOpaqueContext* opaque_context, int index); + +// Loads into the provided '*inputs' pointer the starting address of an array +// of indices representing the tensors that are inputs to the subgraph that is +// associated with the provided 'opaque_context'. The length of the array is +// loaded into the provided 'num_inputs' pointer. Returns 'kTfLiteOk' in case +// of success. Any other return value indicates a failure and will leave +// 'inputs' and 'num_inputs' in an unspecified state. Calls to 'SetInputs' on +// the associated subgraph invalidate the loaded pointers. +TFL_CAPI_EXPORT +TfLiteStatus TfLiteOpaqueContextGetInputs( + const struct TfLiteOpaqueContext* opaque_context, const int** inputs, + int* num_inputs); + +// Loads into the provided '*outputs' pointer the starting address of an array +// of indices representing the tensors that are outputs to the subgraph that is +// associated with the provided 'opaque_context'. The length of the array is +// loaded into the provided 'num_outputs' pointer. Returns 'kTfLiteOk' in case +// of success. Any other return value indicates a failure and will leave +// 'outputs' and 'num_outputs' in an unspecified state. Calls to 'SetOutputs' +// on the associated subgraph invalidate the loaded pointers. +TFL_CAPI_EXPORT +TfLiteStatus TfLiteOpaqueContextGetOutputs( + const struct TfLiteOpaqueContext* opaque_context, const int** outputs, + int* num_outputs); + +// Loads into the provided '*variables' pointer the starting address of an array +// of indices representing the tensors that are variables to the subgraph that +// is associated with the provided 'opaque_context'. The length of the array is +// loaded into the provided 'num_variables' pointer. Returns 'kTfLiteOk' in +// case of success. Any other return value indicates a failure and will leave +// 'variables' and 'num_variables' in an unspecified state. Calls to +// 'SetVariables' on the associated subgraph invalidate the loaded pointers. +TFL_CAPI_EXPORT +TfLiteStatus TfLiteOpaqueContextGetVariables( + const struct TfLiteOpaqueContext* opaque_context, const int** variables, + int* num_variables); + +// Returns the number of nodes associated with the provided 'opaque_context'. +TFL_CAPI_EXPORT +size_t TfLiteOpaqueContextGetNumNodes( + const struct TfLiteOpaqueContext* opaque_context); + +// Returns the number of tensors associated with the provided 'opaque_context'. +TFL_CAPI_EXPORT +size_t TfLiteOpaqueContextGetNumTensors( + const struct TfLiteOpaqueContext* opaque_context); + +// Returns the name of the subgraph that is associated with the provided +// 'opaque_context'. Typically the returned pointer will remain valid +// throughout the lifetime of the subgraph, but may be invalidated by a call to +// 'Subgraph::SetName'. +TFL_CAPI_EXPORT +const char* TfLiteOpaqueContextGetName( + const struct TfLiteOpaqueContext* opaque_context); + +// Resizes the provided 'tensor' that is associated with the provided +// 'context' so that the 'tensor's shape matches the dimensionality specified +// via the provided 'new_size' array. Returns 'kTfLiteOk' in +// case of success. Any other return value indicates a failure and will leave +// the 'tensor' in an unspecified state. The TF Lite runtime takes ownership +// of the 'new_size' array, even in case of failure. +TFL_CAPI_EXPORT +TfLiteStatus TfLiteOpaqueContextResizeTensor(TfLiteOpaqueContext* context, + TfLiteOpaqueTensor* tensor, + TfLiteIntArray* new_size); + +// Entry point for C API GetSubgraphContext. +// +// Retrieves the corresponding TfLiteOpaqueContext of a subgraph given a +// subgraph index. If an invalid subgraph index is given, then returns nullptr. +TFL_CAPI_EXPORT +TfLiteOpaqueContext* TfLiteOpaqueContextGetSubgraphContext( + struct TfLiteOpaqueContext* opaque_context, int subgraph_index); + +// Entry point for C API MarkSubgraphAsDelegationSkippable +// +// Marks the subgraph with the given index as "delegation-skippable". Returns +// kTfLiteOk if the given subgraph index is valid and is successfully marked +// as delegation-skippable, and an error status if the subgraph index is +// invalid. +// If a subgraph is delegation-skippable, then the subgraph will be handled by a +// TfLiteOpaqueDelegate (and that the delegate is supposed to be already aware +// of this state), and therefore, TfLiteInterpreter can skip invoking +// `ModifyGraphWithDelegate` on this subgraph. +// NOTE: This function is expected to be called only when the subgraph that +// `subgraph_index` is pointing to should be skipped by +// interpreter::ModifyGraphWithDelegate (e.g. the subgraph is part of the list +// of callee subgraphs of the same control flow node, and all of those callees +// are supported by the same delegate at once). +// +// For example, this function can be used when the delegate is handling control +// flow ops like while op. +// E.g. A while op has condition subgraph indexed at `i` and body subgraph +// indexed at `j`. The op can be delegated when the following condition +// satisfied: +// 1. The delegate supports while op +// 2. Both condition subgraph `i` and body subgraph `j` can be fully delegated +// by the delegate. +// Then if the delegate decides to support the while node along with both body +// and condition subgraphs, it should mark subgraphs `i` and `j` skippable so +// those two subgraphs won't be delegated separately again after being +// absorbed by the parent subgraph. +// WARNING: It is the delegate's responsibility to define when to skip +// subgraph->ModifyGraphWithDelegate, to check any edge cases (i.e. multiple +// references to the subgraph that `subgraph_index` is pointing to), and to mark +// that subgraph as skippable using this function. +TFL_CAPI_EXPORT +TfLiteStatus TfLiteOpaqueContextMarkSubgraphAsDelegationSkippable( + TfLiteOpaqueContext* opaque_context, int subgraph_index); + +// Reports an error message formed by using the provided 'format' string in +// combination with the data provided via the unnamed arguments following the +// the 'format' parameter ('...'). The intended usage and behavior is the same +// as with 'printf' with regards to how the data and the formatting string +// interact. E.g. +// 'TfLiteOpaqueContextReportError(opaque_context, "a=%d b=%d", a, b);' +// +// The provided 'opaque_context' will be used for reporting the resulting error +// message. +// +// Note that TF Lite clients can use macros like 'TF_LITE_OPAQUE_ENSURE' to +// check for certain conditions to be true, and print an error message if the +// condition does not hold. Direct usage of this function from application code +// should therefore be rare. +TFL_CAPI_EXPORT +void TfLiteOpaqueContextReportError(struct TfLiteOpaqueContext* opaque_context, + const char* format, ...); + +// Same as 'TfLiteOpaqueContextReportError', but with the variable arguments +// passed via a 'va_list' instead of directly. +// +// Callers that receive an ellipsis and want to forward it to +// to the opaque context error reporting API can add the ellipsis content to a +// 'va_list' and then call 'TfLiteOpaqueContextReportErrorVa'. E.g.: +// +// void MyErrorReporter(struct TfLiteOpaqueContext* opaque_context, +// const char* format, ...) { +// va_list vlist; +// va_start(vlist, format); +// TfLiteOpaqueContextReportErrorVa(opaque_context, format, vlist); +// va_end(vlist); +// } +TFL_CAPI_EXPORT +void TfLiteOpaqueContextReportErrorVa( + struct TfLiteOpaqueContext* opaque_context, const char* format, + va_list vlist); + +// Since we must not depend on any libraries, define a minimal subset of +// error macros while avoiding names that have pre-conceived meanings like +// assert and check. + +// Try to make all reporting calls through TF_LITE_OPAQUE_KERNEL_LOG rather than +// calling the TfLiteOpaqueContextReportError function directly, so that message +// strings can be stripped out if the binary size needs to be severely +// optimized. +#ifndef TF_LITE_STRIP_ERROR_STRINGS + +#if !defined(TF_LITE_OPAQUE_KERNEL_LOG) +#define TF_LITE_OPAQUE_KERNEL_LOG(opaque_context, ...) \ + do { \ + TfLiteOpaqueContextReportError((opaque_context), __VA_ARGS__); \ + } while (false) +#endif + +#if !defined(TF_LITE_OPAQUE_MAYBE_KERNEL_LOG) +#define TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(opaque_context, ...) \ + do { \ + if ((opaque_context) != nullptr) { \ + TfLiteOpaqueContextReportError((opaque_context), __VA_ARGS__); \ + } \ + } while (false) +#endif + +#else // TF_LITE_STRIP_ERROR_STRINGS +#define ARGS_UNUSED(...) (void)sizeof(#__VA_ARGS__) + +#if !defined(TF_LITE_OPAQUE_MAYBE_KERNEL_LOG) +#define TF_LITE_OPAQUE_KERNEL_LOG(opaque_context, ...) ARGS_UNUSED(__VA_ARGS__) +#endif + +#if !defined(TF_LITE_OPAQUE_MAYBE_KERNEL_LOG) +#define TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(opaque_context, ...) \ + ARGS_UNUSED(__VA_ARGS__) +#endif + +#endif // TF_LITE_STRIP_ERROR_STRINGS + +// Check whether value is true, and if not return kTfLiteError from +// the current function (and report the error string msg). +#if !defined(TF_LITE_OPAQUE_ENSURE_MSG) +#define TF_LITE_OPAQUE_ENSURE_MSG(opaque_context, value, msg) \ + do { \ + if (!(value)) { \ + TF_LITE_OPAQUE_KERNEL_LOG((opaque_context), __FILE__ " " msg); \ + return kTfLiteError; \ + } \ + } while (0) +#endif + +// Check whether the value `a` is true, and if not return kTfLiteError from +// the current function, while also reporting the location of the error. +#if !defined(TF_LITE_OPAQUE_ENSURE) +#define TF_LITE_OPAQUE_ENSURE(opaque_context, a) \ + do { \ + if (!(a)) { \ + TF_LITE_OPAQUE_KERNEL_LOG(opaque_context, "%s:%d: %s was not true.", \ + __FILE__, __LINE__, #a); \ + return kTfLiteError; \ + } \ + } while (0) +#endif + +// Check whether the value `a == b` is true, and if not return kTfLiteError from +// the current function, while also reporting the location of the error. +// `a` and `b` may be evaluated more than once, so no side effects or +// extremely expensive computations should be done. +// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes. +#if !defined(TF_LITE_OPAQUE_ENSURE_EQ) +#define TF_LITE_OPAQUE_ENSURE_EQ(opaque_context, a, b) \ + do { \ + if ((a) != (b)) { \ + TF_LITE_OPAQUE_KERNEL_LOG((opaque_context), \ + "%s:%d: %s != %s (%d != %d)", __FILE__, \ + __LINE__, #a, #b, (a), (b)); \ + return kTfLiteError; \ + } \ + } while (0) +#endif + +#if !defined(TF_LITE_OPAQUE_ENSURE_TYPES_EQ) +#define TF_LITE_OPAQUE_ENSURE_TYPES_EQ(opaque_context, a, b) \ + do { \ + if ((a) != (b)) { \ + TF_LITE_OPAQUE_KERNEL_LOG( \ + (opaque_context), "%s:%d: %s != %s (%s != %s)", __FILE__, __LINE__, \ + #a, #b, TfLiteTypeGetName(a), TfLiteTypeGetName(b)); \ + return kTfLiteError; \ + } \ + } while (0) +#endif + +#if !defined(TF_LITE_OPAQUE_ENSURE_NEAR) +#define TF_LITE_OPAQUE_ENSURE_NEAR(opaque_context, a, b, epsilon) \ + do { \ + double delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); \ + if (delta > epsilon) { \ + TF_LITE_OPAQUE_KERNEL_LOG((opaque_context), \ + "%s:%d: %s not near %s (%f != %f)", __FILE__, \ + __LINE__, #a, #b, (double)(a), (double)(b)); \ + return kTfLiteError; \ + } \ + } while (0) +#endif + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif // TENSORFLOW_LITE_CORE_C_C_API_OPAQUE_H_ diff --git a/include/tensorflow/lite/core/c/c_api_types.h b/include/tensorflow/lite/core/c/c_api_types.h new file mode 100644 index 00000000000..670ec1ee553 --- /dev/null +++ b/include/tensorflow/lite/core/c/c_api_types.h @@ -0,0 +1,169 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// This file declares types used by the pure C inference API defined in c_api.h, +// some of which are also used in the C++ and C kernel and interpreter APIs. + +/// WARNING: Users of TensorFlow Lite should not include this file directly, +/// but should instead include +/// "third_party/tensorflow/lite/c/c_api_types.h". +/// Only the TensorFlow Lite implementation itself should include this +/// file directly. +// IWYU pragma: private, include "third_party/tensorflow/lite/c/c_api_types.h" + +#ifndef TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_ +#define TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// Define TFL_CAPI_EXPORT macro to export a function properly with a shared +// library. +#ifdef SWIG +#define TFL_CAPI_EXPORT +#elif defined(TFL_STATIC_LIBRARY_BUILD) +#define TFL_CAPI_EXPORT +#else // not definded TFL_STATIC_LIBRARY_BUILD +#if defined(_WIN32) +#ifdef TFL_COMPILE_LIBRARY +#define TFL_CAPI_EXPORT __declspec(dllexport) +#else +#define TFL_CAPI_EXPORT __declspec(dllimport) +#endif // TFL_COMPILE_LIBRARY +#else +#define TFL_CAPI_EXPORT __attribute__((visibility("default"))) +#endif // _WIN32 +#endif // SWIG + +// Note that new error status values may be added in future in order to +// indicate more fine-grained internal states, therefore, applications should +// not rely on status values being members of the enum. +typedef enum TfLiteStatus { + kTfLiteOk = 0, + + // Generally referring to an error in the runtime (i.e. interpreter) + kTfLiteError = 1, + + // Generally referring to an error from a TfLiteDelegate itself. + kTfLiteDelegateError = 2, + + // Generally referring to an error in applying a delegate due to + // incompatibility between runtime and delegate, e.g., this error is returned + // when trying to apply a TF Lite delegate onto a model graph that's already + // immutable. + kTfLiteApplicationError = 3, + + // Generally referring to serialized delegate data not being found. + // See tflite::delegates::Serialization. + kTfLiteDelegateDataNotFound = 4, + + // Generally referring to data-writing issues in delegate serialization. + // See tflite::delegates::Serialization. + kTfLiteDelegateDataWriteError = 5, + + // Generally referring to data-reading issues in delegate serialization. + // See tflite::delegates::Serialization. + kTfLiteDelegateDataReadError = 6, + + // Generally referring to issues when the TF Lite model has ops that cannot be + // resolved at runtime. This could happen when the specific op is not + // registered or built with the TF Lite framework. + kTfLiteUnresolvedOps = 7, + + // Generally referring to invocation cancelled by the user. + // See `interpreter::Cancel`. + // TODO(b/194915839): Implement `interpreter::Cancel`. + // TODO(b/250636993): Cancellation triggered by `SetCancellationFunction` + // should also return this status code. + kTfLiteCancelled = 8, +} TfLiteStatus; + +// Types supported by tensor +typedef enum { + kTfLiteNoType = 0, + kTfLiteFloat32 = 1, + kTfLiteInt32 = 2, + kTfLiteUInt8 = 3, + kTfLiteInt64 = 4, + kTfLiteString = 5, + kTfLiteBool = 6, + kTfLiteInt16 = 7, + kTfLiteComplex64 = 8, + kTfLiteInt8 = 9, + kTfLiteFloat16 = 10, + kTfLiteFloat64 = 11, + kTfLiteComplex128 = 12, + kTfLiteUInt64 = 13, + kTfLiteResource = 14, + kTfLiteVariant = 15, + kTfLiteUInt32 = 16, + kTfLiteUInt16 = 17, + kTfLiteInt4 = 18, +} TfLiteType; + +// Legacy. Will be deprecated in favor of TfLiteAffineQuantization. +// If per-layer quantization is specified this field will still be populated in +// addition to TfLiteAffineQuantization. +// Parameters for asymmetric quantization. Quantized values can be converted +// back to float using: +// real_value = scale * (quantized_value - zero_point) +typedef struct TfLiteQuantizationParams { + float scale; + int32_t zero_point; +} TfLiteQuantizationParams; + +// -------------------------------------------------------------------------- +// Opaque types used by c_api.h, c_api_opaque.h and common.h. + +// TfLiteOpaqueContext is an opaque version of TfLiteContext; +typedef struct TfLiteOpaqueContext TfLiteOpaqueContext; + +// TfLiteOpaqueNode is an opaque version of TfLiteNode; +typedef struct TfLiteOpaqueNode TfLiteOpaqueNode; + +// TfLiteOpaqueTensor is an opaque version of TfLiteTensor; +typedef struct TfLiteOpaqueTensor TfLiteOpaqueTensor; + +// TfLiteDelegate: allows delegation of nodes to alternative backends. +// Forward declaration of concrete type declared in common.h. +typedef struct TfLiteDelegate TfLiteDelegate; + +// TfLiteOpaqueDelegateStruct: unconditionally opaque version of +// TfLiteDelegate; allows delegation of nodes to alternative backends. +// +// This is an abstract type that is intended to have the same +// role as TfLiteDelegate, but without exposing the implementation +// details of how delegates are implemented. +// WARNING: This is an experimental type and subject to change. +typedef struct TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegateStruct; + +// TfLiteOpaqueDelegate: conditionally opaque version of +// TfLiteDelegate; allows delegation of nodes to alternative backends. +// For TF Lite in Play Services, this is an opaque type, +// but for regular TF Lite, this is just a typedef for TfLiteDelegate. +// WARNING: This is an experimental type and subject to change. +#if TFLITE_WITH_STABLE_ABI || TFLITE_USE_OPAQUE_DELEGATE +typedef TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegate; +#else +typedef TfLiteDelegate TfLiteOpaqueDelegate; +#endif + +#ifdef __cplusplus +} // extern C +#endif +#endif // TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_ diff --git a/include/tensorflow/lite/core/c/common.h b/include/tensorflow/lite/core/c/common.h new file mode 100644 index 00000000000..179d8cb902b --- /dev/null +++ b/include/tensorflow/lite/core/c/common.h @@ -0,0 +1,1205 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// This file defines common C types and APIs for implementing operations, +// delegates and other constructs in TensorFlow Lite. The actual operations and +// delegates can be defined using C++, but the interface between the interpreter +// and the operations are C. +// +// Summary of abstractions +// TF_LITE_ENSURE - Self-sufficient error checking +// TfLiteStatus - Status reporting +// TfLiteIntArray - stores tensor shapes (dims), +// TfLiteContext - allows an op to access the tensors +// TfLiteTensor - tensor (a multidimensional array) +// TfLiteNode - a single node or operation +// TfLiteRegistration - the implementation of a conceptual operation. +// TfLiteDelegate - allows delegation of nodes to alternative backends. +// +// Some abstractions in this file are created and managed by Interpreter. +// +// NOTE: The order of values in these structs are "semi-ABI stable". New values +// should be added only to the end of structs and never reordered. + +/// WARNING: Users of TensorFlow Lite should not include this file directly, +/// but should instead include +/// "third_party/tensorflow/lite/c/common.h". +/// Only the TensorFlow Lite implementation itself should include this +/// file directly. +// IWYU pragma: private, include "third_party/tensorflow/lite/c/common.h" + +#ifndef TENSORFLOW_LITE_CORE_C_COMMON_H_ +#define TENSORFLOW_LITE_CORE_C_COMMON_H_ + +#include +#include +#include +#include + +#include "tensorflow/lite/core/c/c_api_types.h" // IWYU pragma: export + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// The list of external context types known to TF Lite. This list exists solely +// to avoid conflicts and to ensure ops can share the external contexts they +// need. Access to the external contexts is controlled by one of the +// corresponding support files. +typedef enum TfLiteExternalContextType { + kTfLiteEigenContext = 0, // include eigen_support.h to use. + kTfLiteGemmLowpContext = 1, // include gemm_support.h to use. + kTfLiteEdgeTpuContext = 2, // Placeholder for Edge TPU support. + kTfLiteCpuBackendContext = 3, // include cpu_backend_context.h to use. + kTfLiteMaxExternalContexts = 4 +} TfLiteExternalContextType; + +// Forward declare so dependent structs and methods can reference these types +// prior to the struct definitions. +struct TfLiteContext; +struct TfLiteDelegate; +struct TfLiteRegistration; +struct TfLiteOpaqueDelegateBuilder; + +// An external context is a collection of information unrelated to the TF Lite +// framework, but useful to a subset of the ops. TF Lite knows very little +// about the actual contexts, but it keeps a list of them, and is able to +// refresh them if configurations like the number of recommended threads +// change. +typedef struct TfLiteExternalContext { + TfLiteExternalContextType type; + TfLiteStatus (*Refresh)(struct TfLiteContext* context); +} TfLiteExternalContext; + +#define kTfLiteOptionalTensor (-1) + +// Fixed size list of integers. Used for dimensions and inputs/outputs tensor +// indices +typedef struct TfLiteIntArray { + int size; + +#if defined(_MSC_VER) + // Context for why this is needed is in http://b/189926408#comment21 + int data[1]; +#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ + __GNUC_MINOR__ >= 1) || \ + defined(HEXAGON) || \ + (defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1) + // gcc 6.1+ have a bug where flexible members aren't properly handled + // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c + int data[0]; +#else + int data[]; +#endif +} TfLiteIntArray; + +// Given the size (number of elements) in a TfLiteIntArray, calculate its size +// in bytes. +size_t TfLiteIntArrayGetSizeInBytes(int size); + +#ifndef TF_LITE_STATIC_MEMORY +// Create a array of a given `size` (uninitialized entries). +// This returns a pointer, that you must free using TfLiteIntArrayFree(). +TfLiteIntArray* TfLiteIntArrayCreate(int size); +#endif + +// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise. +int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b); + +// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise. +int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, + const int b_data[]); + +#ifndef TF_LITE_STATIC_MEMORY +// Create a copy of an array passed as `src`. +// You are expected to free memory with TfLiteIntArrayFree +TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src); + +// Free memory of array `a`. +void TfLiteIntArrayFree(TfLiteIntArray* a); +#endif // TF_LITE_STATIC_MEMORY + +// Fixed size list of floats. Used for per-channel quantization. +typedef struct TfLiteFloatArray { + int size; +#if defined(_MSC_VER) + // Context for why this is needed is in http://b/189926408#comment21 + float data[1]; +#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ + __GNUC_MINOR__ >= 1) || \ + defined(HEXAGON) || \ + (defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1) + // gcc 6.1+ have a bug where flexible members aren't properly handled + // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c + float data[0]; +#else + float data[]; +#endif +} TfLiteFloatArray; + +// Given the size (number of elements) in a TfLiteFloatArray, calculate its size +// in bytes. +int TfLiteFloatArrayGetSizeInBytes(int size); + +#ifndef TF_LITE_STATIC_MEMORY +// Create a array of a given `size` (uninitialized entries). +// This returns a pointer, that you must free using TfLiteFloatArrayFree(). +TfLiteFloatArray* TfLiteFloatArrayCreate(int size); + +// Create a copy of an array passed as `src`. +// You are expected to free memory with TfLiteFloatArrayFree. +TfLiteFloatArray* TfLiteFloatArrayCopy(const TfLiteFloatArray* src); + +// Free memory of array `a`. +void TfLiteFloatArrayFree(TfLiteFloatArray* a); +#endif // TF_LITE_STATIC_MEMORY + +// Since we must not depend on any libraries, define a minimal subset of +// error macros while avoiding names that have pre-conceived meanings like +// assert and check. + +// Try to make all reporting calls through TF_LITE_KERNEL_LOG rather than +// calling the context->ReportError function directly, so that message strings +// can be stripped out if the binary size needs to be severely optimized. +#ifndef TF_LITE_STRIP_ERROR_STRINGS +#define TF_LITE_KERNEL_LOG(context, ...) \ + do { \ + (context)->ReportError((context), __VA_ARGS__); \ + } while (false) + +#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) \ + do { \ + if ((context) != nullptr) { \ + (context)->ReportError((context), __VA_ARGS__); \ + } \ + } while (false) +#else // TF_LITE_STRIP_ERROR_STRINGS +#define ARGS_UNUSED(...) (void)sizeof(#__VA_ARGS__) +#define TF_LITE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__) +#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__) +#endif // TF_LITE_STRIP_ERROR_STRINGS + +// Check whether value is true, and if not return kTfLiteError from +// the current function (and report the error string msg). +#define TF_LITE_ENSURE_MSG(context, value, msg) \ + do { \ + if (!(value)) { \ + TF_LITE_KERNEL_LOG((context), __FILE__ " " msg); \ + return kTfLiteError; \ + } \ + } while (0) + +// Check whether the value `a` is true, and if not return kTfLiteError from +// the current function, while also reporting the location of the error. +#define TF_LITE_ENSURE(context, a) \ + do { \ + if (!(a)) { \ + TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, \ + __LINE__, #a); \ + return kTfLiteError; \ + } \ + } while (0) + +#define TF_LITE_ENSURE_STATUS(a) \ + do { \ + const TfLiteStatus s = (a); \ + if (s != kTfLiteOk) { \ + return s; \ + } \ + } while (0) + +// Check whether the value `a == b` is true, and if not return kTfLiteError from +// the current function, while also reporting the location of the error. +// `a` and `b` may be evaluated more than once, so no side effects or +// extremely expensive computations should be done. +// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes. +#define TF_LITE_ENSURE_EQ(context, a, b) \ + do { \ + if ((a) != (b)) { \ + TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%d != %d)", __FILE__, \ + __LINE__, #a, #b, (a), (b)); \ + return kTfLiteError; \ + } \ + } while (0) + +#define TF_LITE_ENSURE_TYPES_EQ(context, a, b) \ + do { \ + if ((a) != (b)) { \ + TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%s != %s)", __FILE__, \ + __LINE__, #a, #b, TfLiteTypeGetName(a), \ + TfLiteTypeGetName(b)); \ + return kTfLiteError; \ + } \ + } while (0) + +#define TF_LITE_ENSURE_NEAR(context, a, b, epsilon) \ + do { \ + auto delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); \ + if (delta > epsilon) { \ + TF_LITE_KERNEL_LOG((context), "%s:%d %s not near %s (%f != %f)", \ + __FILE__, __LINE__, #a, #b, static_cast(a), \ + static_cast(b)); \ + return kTfLiteError; \ + } \ + } while (0) + +#define TF_LITE_ENSURE_OK(context, status) \ + do { \ + const TfLiteStatus s = (status); \ + if ((s) != kTfLiteOk) { \ + return s; \ + } \ + } while (0) + +// Single-precision complex data type compatible with the C99 definition. +typedef struct TfLiteComplex64 { + float re, im; // real and imaginary parts, respectively. +} TfLiteComplex64; + +// Double-precision complex data type compatible with the C99 definition. +typedef struct TfLiteComplex128 { + double re, im; // real and imaginary parts, respectively. +} TfLiteComplex128; + +// Half precision data type compatible with the C99 definition. +typedef struct TfLiteFloat16 { + uint16_t data; +} TfLiteFloat16; + +// Return the name of a given type, for error reporting purposes. +const char* TfLiteTypeGetName(TfLiteType type); + +// SupportedQuantizationTypes. +typedef enum TfLiteQuantizationType { + // No quantization. + kTfLiteNoQuantization = 0, + // Affine quantization (with support for per-channel quantization). + // Corresponds to TfLiteAffineQuantization. + kTfLiteAffineQuantization = 1, +} TfLiteQuantizationType; + +// Structure specifying the quantization used by the tensor, if-any. +typedef struct TfLiteQuantization { + // The type of quantization held by params. + TfLiteQuantizationType type; + // Holds an optional reference to a quantization param structure. The actual + // type depends on the value of the `type` field (see the comment there for + // the values and corresponding types). + void* params; +} TfLiteQuantization; + +// Parameters for asymmetric quantization across a dimension (i.e per output +// channel quantization). +// quantized_dimension specifies which dimension the scales and zero_points +// correspond to. +// For a particular value in quantized_dimension, quantized values can be +// converted back to float using: +// real_value = scale * (quantized_value - zero_point) +typedef struct TfLiteAffineQuantization { + TfLiteFloatArray* scale; + TfLiteIntArray* zero_point; + int32_t quantized_dimension; +} TfLiteAffineQuantization; + +/* A union of pointers that points to memory for a given tensor. */ +typedef union TfLitePtrUnion { + /* Do not access these members directly, if possible, use + * GetTensorData(tensor) instead, otherwise only access .data, as other + * members are deprecated. */ + int32_t* i32; + uint32_t* u32; + int64_t* i64; + uint64_t* u64; + float* f; + TfLiteFloat16* f16; + double* f64; + char* raw; + const char* raw_const; + uint8_t* uint8; + bool* b; + int16_t* i16; + uint16_t* ui16; + TfLiteComplex64* c64; + TfLiteComplex128* c128; + int8_t* int8; + /* Only use this member. */ + void* data; +} TfLitePtrUnion; + +// Memory allocation strategies. +// * kTfLiteMmapRo: Read-only memory-mapped data, or data externally allocated. +// * kTfLiteArenaRw: Arena allocated with no guarantees about persistence, +// and available during eval. +// * kTfLiteArenaRwPersistent: Arena allocated but persistent across eval, and +// only available during eval. +// * kTfLiteDynamic: Allocated during eval, or for string tensors. +// * kTfLitePersistentRo: Allocated and populated during prepare. This is +// useful for tensors that can be computed during prepare and treated +// as constant inputs for downstream ops (also in prepare). +// * kTfLiteCustom: Custom memory allocation provided by the user. See +// TfLiteCustomAllocation below. +typedef enum TfLiteAllocationType { + kTfLiteMemNone = 0, + kTfLiteMmapRo, + kTfLiteArenaRw, + kTfLiteArenaRwPersistent, + kTfLiteDynamic, + kTfLitePersistentRo, + kTfLiteCustom, +} TfLiteAllocationType; + +// The delegates should use zero or positive integers to represent handles. +// -1 is reserved from unallocated status. +typedef int TfLiteBufferHandle; +enum { + kTfLiteNullBufferHandle = -1, +}; + +// Storage format of each dimension in a sparse tensor. +typedef enum TfLiteDimensionType { + kTfLiteDimDense = 0, + kTfLiteDimSparseCSR, +} TfLiteDimensionType; + +// Metadata to encode each dimension in a sparse tensor. +typedef struct TfLiteDimensionMetadata { + TfLiteDimensionType format; + int dense_size; + TfLiteIntArray* array_segments; + TfLiteIntArray* array_indices; +} TfLiteDimensionMetadata; + +// Parameters used to encode a sparse tensor. For detailed explanation of each +// field please refer to lite/schema/schema.fbs. +typedef struct TfLiteSparsity { + TfLiteIntArray* traversal_order; + TfLiteIntArray* block_map; + TfLiteDimensionMetadata* dim_metadata; + int dim_metadata_size; +} TfLiteSparsity; + +// Defines a custom memory allocation not owned by the runtime. +// `data` should be aligned to kDefaultTensorAlignment defined in +// lite/util.h. (Currently 64 bytes) +// NOTE: See Interpreter.SetCustomAllocationForTensor for details on usage. +typedef struct TfLiteCustomAllocation { + void* data; + size_t bytes; +} TfLiteCustomAllocation; + +// The flags used in `Interpreter::SetCustomAllocationForTensor`. +// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc. +typedef enum TfLiteCustomAllocationFlags { + kTfLiteCustomAllocationFlagsNone = 0, + // Skips checking whether allocation.data points to an aligned buffer as + // expected by the TFLite runtime. + // NOTE: Setting this flag can cause crashes when calling Invoke(). + // Use with caution. + kTfLiteCustomAllocationFlagsSkipAlignCheck = 1, +} TfLiteCustomAllocationFlags; + +// A tensor in the interpreter system which is a wrapper around a buffer of +// data including a dimensionality (or NULL if not currently defined). +#ifndef TF_LITE_STATIC_MEMORY +typedef struct TfLiteTensor { + // The data type specification for data stored in `data`. This affects + // what member of `data` union should be used. + TfLiteType type; + // A union of data pointers. The appropriate type should be used for a typed + // tensor based on `type`. + TfLitePtrUnion data; + // A pointer to a structure representing the dimensionality interpretation + // that the buffer should have. NOTE: the product of elements of `dims` + // and the element datatype size should be equal to `bytes` below. + TfLiteIntArray* dims; + // Quantization information. + TfLiteQuantizationParams params; + // How memory is mapped + // kTfLiteMmapRo: Memory mapped read only. + // i.e. weights + // kTfLiteArenaRw: Arena allocated read write memory + // (i.e. temporaries, outputs). + TfLiteAllocationType allocation_type; + // The number of bytes required to store the data of this Tensor. I.e. + // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if + // type is kTfLiteFloat32 and dims = {3, 2} then + // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. + size_t bytes; + + // An opaque pointer to a tflite::MMapAllocation + const void* allocation; + + // Null-terminated name of this tensor. + const char* name; + + // The delegate which knows how to handle `buffer_handle`. + // WARNING: This is an experimental interface that is subject to change. + struct TfLiteDelegate* delegate; + + // An integer buffer handle that can be handled by `delegate`. + // The value is valid only when delegate is not null. + // WARNING: This is an experimental interface that is subject to change. + TfLiteBufferHandle buffer_handle; + + // If the delegate uses its own buffer (e.g. GPU memory), the delegate is + // responsible to set data_is_stale to true. + // `delegate->CopyFromBufferHandle` can be called to copy the data from + // delegate buffer. + // WARNING: This is an // experimental interface that is subject to change. + bool data_is_stale; + + // True if the tensor is a variable. + bool is_variable; + + // Quantization information. Replaces params field above. + TfLiteQuantization quantization; + + // Parameters used to encode a sparse tensor. + // This is optional. The field is NULL if a tensor is dense. + // WARNING: This is an experimental interface that is subject to change. + TfLiteSparsity* sparsity; + + // Optional. Encodes shapes with unknown dimensions with -1. This field is + // only populated when unknown dimensions exist in a read-write tensor (i.e. + // an input or output tensor). (e.g. `dims` contains [1, 1, 1, 3] and + // `dims_signature` contains [1, -1, -1, 3]). If no unknown dimensions exist + // then `dims_signature` is either null, or set to an empty array. Note that + // this field only exists when TF_LITE_STATIC_MEMORY is not defined. + const TfLiteIntArray* dims_signature; +} TfLiteTensor; + +// A structure representing an instance of a node. +// This structure only exhibits the inputs, outputs, user defined data and some +// node properties (like statefulness), not other features like the type. +typedef struct TfLiteNode { + // Inputs to this node expressed as indices into the simulator's tensors. + TfLiteIntArray* inputs; + + // Outputs to this node expressed as indices into the simulator's tensors. + TfLiteIntArray* outputs; + + // intermediate tensors to this node expressed as indices into the simulator's + // tensors. + TfLiteIntArray* intermediates; + + // Temporary tensors uses during the computations. This usually contains no + // tensors, but ops are allowed to change that if they need scratch space of + // any sort. + TfLiteIntArray* temporaries; + + // Opaque data provided by the node implementer through `Registration.init`. + void* user_data; + + // Opaque data provided to the node if the node is a builtin. This is usually + // a structure defined in builtin_op_data.h + void* builtin_data; + + // Custom initial data. This is the opaque data provided in the flatbuffer. + // WARNING: This is an experimental interface that is subject to change. + const void* custom_initial_data; + int custom_initial_data_size; + + // The pointer to the delegate. This is non-null only when the node is + // created by calling `interpreter.ModifyGraphWithDelegate`. + // WARNING: This is an experimental interface that is subject to change. + struct TfLiteDelegate* delegate; + + // Whether this op might have side effect (e.g. stateful op). + bool might_have_side_effect; +} TfLiteNode; +#else // defined(TF_LITE_STATIC_MEMORY)? +// NOTE: This flag is opt-in only at compile time. +// +// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct +// contains only the minimum fields required to initialize and prepare a micro +// inference graph. The fields in this struct have been ordered from +// largest-to-smallest for optimal struct sizeof. +// +// This struct does not use: +// - allocation +// - buffer_handle +// - data_is_stale +// - delegate +// - dims_signature +// - name +// - sparsity +typedef struct TfLiteTensor { + // TODO(b/155784997): Consider consolidating these quantization fields: + // Quantization information. Replaces params field above. + TfLiteQuantization quantization; + + // Quantization information. + TfLiteQuantizationParams params; + + // A union of data pointers. The appropriate type should be used for a typed + // tensor based on `type`. + TfLitePtrUnion data; + + // A pointer to a structure representing the dimensionality interpretation + // that the buffer should have. NOTE: the product of elements of `dims` + // and the element datatype size should be equal to `bytes` below. + TfLiteIntArray* dims; + + // The number of bytes required to store the data of this Tensor. I.e. + // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if + // type is kTfLiteFloat32 and dims = {3, 2} then + // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. + size_t bytes; + + // The data type specification for data stored in `data`. This affects + // what member of `data` union should be used. + TfLiteType type; + + // How memory is mapped + // kTfLiteMmapRo: Memory mapped read only. + // i.e. weights + // kTfLiteArenaRw: Arena allocated read write memory + // (i.e. temporaries, outputs). + TfLiteAllocationType allocation_type; + + // True if the tensor is a variable. + bool is_variable; +} TfLiteTensor; + +// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains +// only the minimum fields required to represent a node. +// +// This struct does not use: +// - delegate +// - intermediates +// - temporaries +typedef struct TfLiteNode { + // Inputs to this node expressed as indices into the simulator's tensors. + TfLiteIntArray* inputs; + + // Outputs to this node expressed as indices into the simulator's tensors. + TfLiteIntArray* outputs; + + // intermediate tensors to this node expressed as indices into the simulator's + // tensors. + TfLiteIntArray* intermediates; + + // Opaque data provided by the node implementer through `Registration.init`. + void* user_data; + + // Opaque data provided to the node if the node is a builtin. This is usually + // a structure defined in builtin_op_data.h + void* builtin_data; + + // Custom initial data. This is the opaque data provided in the flatbuffer. + // WARNING: This is an experimental interface that is subject to change. + const void* custom_initial_data; + int custom_initial_data_size; +} TfLiteNode; +#endif // TF_LITE_STATIC_MEMORY + +// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount +// of information required for a kernel to run during TfLiteRegistration::Eval. +// TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM +// builds with this flag by default internally. +typedef struct TfLiteEvalTensor { + // A union of data pointers. The appropriate type should be used for a typed + // tensor based on `type`. + TfLitePtrUnion data; + + // A pointer to a structure representing the dimensionality interpretation + // that the buffer should have. + TfLiteIntArray* dims; + + // The data type specification for data stored in `data`. This affects + // what member of `data` union should be used. + TfLiteType type; +} TfLiteEvalTensor; + +#ifndef TF_LITE_STATIC_MEMORY +// Free data memory of tensor `t`. +void TfLiteTensorDataFree(TfLiteTensor* t); + +// Free quantization data. +void TfLiteQuantizationFree(TfLiteQuantization* quantization); + +// Free sparsity parameters. +void TfLiteSparsityFree(TfLiteSparsity* sparsity); + +// Free memory of tensor `t`. +void TfLiteTensorFree(TfLiteTensor* t); + +// Set all of a tensor's fields (and free any previously allocated data). +void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, + TfLiteQuantizationParams quantization, char* buffer, + size_t size, TfLiteAllocationType allocation_type, + const void* allocation, bool is_variable, + TfLiteTensor* tensor); + +// Copies the contents of 'src' in 'dst'. +// Function does nothing if either 'src' or 'dst' is passed as nullptr and +// return kTfLiteOk. +// Returns kTfLiteError if 'src' and 'dst' doesn't have matching data size. +// Note function copies contents, so it won't create new data pointer +// or change allocation type. +// All Tensor related properties will be copied from 'src' to 'dst' like +// quantization, sparsity, ... +TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst); + +// Change the size of the memory block owned by `tensor` to `num_bytes`. +// Tensors with allocation types other than `kTfLiteDynamic` will be ignored and +// a kTfLiteOk will be returned. +// `tensor`'s internal data buffer will be assigned a pointer +// which can safely be passed to free or realloc if `num_bytes` is zero. +// If `preserve_data` is true, tensor data will be unchanged in the range from +// the start of the region up to the minimum of the old and new sizes. In the +// case of NULL tensor, or an error allocating new memory, returns +// `kTfLiteError`. +TfLiteStatus TfLiteTensorResizeMaybeCopy(size_t num_bytes, TfLiteTensor* tensor, + bool preserve_data); + +// Change the size of the memory block owned by `tensor` to `num_bytes`. +// Tensors with allocation types other than kTfLiteDynamic will be ignored and +// a kTfLiteOk will be returned. +// `tensor`'s internal data buffer will be assigned a pointer +// which can safely be passed to free or realloc if `num_bytes` is zero. +// Tensor data will be unchanged in the range from the start of the region up to +// the minimum of the old and new sizes. In the case +// of NULL tensor, or an error allocating new memory, returns `kTfLiteError`. +TfLiteStatus TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor); +#endif // TF_LITE_STATIC_MEMORY + +// WARNING: This is an experimental interface that is subject to change. +// +// Currently, TfLiteDelegateParams has to be allocated in a way that it's +// trivially destructable. It will be stored as `builtin_data` field in +// `TfLiteNode` of the delegate node. +// +// See also the `CreateDelegateParams` function in `interpreter.cc` details. +typedef struct TfLiteDelegateParams { + struct TfLiteDelegate* delegate; + TfLiteIntArray* nodes_to_replace; + TfLiteIntArray* input_tensors; + TfLiteIntArray* output_tensors; +} TfLiteDelegateParams; + +// WARNING: This is an experimental interface that is subject to change. +// +// Currently, TfLiteOpaqueDelegateParams has to be allocated in a way that it's +// trivially destructable. It will be stored as `builtin_data` field in +// `TfLiteNode` of the delegate node. +// +// See also the `CreateOpaqueDelegateParams` function in `subgraph.cc` +// details. +typedef struct TfLiteOpaqueDelegateParams { + TfLiteOpaqueDelegate* delegate; + void* delegate_data; + TfLiteIntArray* nodes_to_replace; + TfLiteIntArray* input_tensors; + TfLiteIntArray* output_tensors; +} TfLiteOpaqueDelegateParams; + +typedef struct TfLiteContext { + // Number of tensors in the context. + size_t tensors_size; + + // The execution plan contains a list of the node indices in execution + // order. execution_plan->size is the current number of nodes. And, + // execution_plan->data[0] is the first node that needs to be run. + // TfLiteDelegates can traverse the current execution plan by iterating + // through each member of this array and using GetNodeAndRegistration() to + // access details about a node. i.e. + // + // TfLiteIntArray* execution_plan; + // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan)); + // for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) { + // int node_index = execution_plan->data[exec_index]; + // TfLiteNode* node; + // TfLiteRegistration* reg; + // context->GetNodeAndRegistration(context, node_index, &node, ®); + // } + // Note: the memory pointed by '`*execution_plan` is OWNED by TfLite runtime. + // Future calls to GetExecutionPlan invalidates earlier outputs. The following + // code snippet shows the issue of such an invocation pattern. After calling + // CheckNode, subsequent access to `plan_1st` is undefined. + // + // void CheckNode(const TfLiteNode* node) { + // ... + // TfLiteIntArray* plan_2nd; + // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_2nd)); + // ... + // } + // + // TfLiteIntArray* plan_1st; + // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_1st)); + // for (int exec_index = 0; exec_index < plan_1st->size; exec_index++) { + // int node_index = plan_1st->data[exec_index]; + // TfLiteNode* node; + // TfLiteRegistration* reg; + // context->GetNodeAndRegistration(context, node_index, &node, ®); + // CheckNode(node); + // } + // + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context, + TfLiteIntArray** execution_plan); + + // An array of tensors in the interpreter context (of length `tensors_size`) + TfLiteTensor* tensors; + + // opaque full context ptr (an opaque c++ data structure) + void* impl_; + + // Request memory pointer be resized. Updates dimensions on the tensor. + // NOTE: ResizeTensor takes ownership of newSize. + TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor, + TfLiteIntArray* new_size); + // Request that an error be reported with format string msg. + void (*ReportError)(struct TfLiteContext*, const char* msg, ...); + + // Add `tensors_to_add` tensors, preserving pre-existing Tensor entries. If + // non-null, the value pointed to by `first_new_tensor_index` will be set to + // the index of the first new tensor. + TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add, + int* first_new_tensor_index); + + // Get a Tensor node by node_index. + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*GetNodeAndRegistration)( + struct TfLiteContext*, int node_index, TfLiteNode** node, + struct TfLiteRegistration** registration); + + // Replace ops with one or more stub delegate operations. This function + // does not take ownership of `nodes_to_replace`. + TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)( + struct TfLiteContext*, struct TfLiteRegistration registration, + const TfLiteIntArray* nodes_to_replace, struct TfLiteDelegate* delegate); + + // Number of threads that are recommended to subsystems like gemmlowp and + // eigen. + int recommended_num_threads; + + // Access external contexts by type. + // WARNING: This is an experimental interface that is subject to change. + TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*, + TfLiteExternalContextType); + // Set the value of a external context. Does not take ownership of the + // pointer. + // WARNING: This is an experimental interface that is subject to change. + void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType, + TfLiteExternalContext*); + + // Flag for allowing float16 precision for FP32 calculation. + // default: false. + // WARNING: This is an experimental API and subject to change. + bool allow_fp32_relax_to_fp16; + + // Pointer to the op-level profiler, if set; nullptr otherwise. + void* profiler; + + // Allocate persistent buffer which has the same life time as the interpreter. + // Returns nullptr on failure. + // The memory is allocated from heap for TFL, and from tail in TFLM. + // This method is only available in Init or Prepare stage. + // WARNING: This is an experimental interface that is subject to change. + void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes); + + // Allocate a buffer which will be deallocated right after invoke phase. + // The memory is allocated from heap in TFL, and from volatile arena in TFLM. + // This method is only available in invoke stage. + // NOTE: If possible use RequestScratchBufferInArena method to avoid memory + // allocation during inference time. + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*AllocateBufferForEval)(struct TfLiteContext* ctx, size_t bytes, + void** ptr); + + // Request a scratch buffer in the arena through static memory planning. + // This method is only available in Prepare stage and the buffer is allocated + // by the interpreter between Prepare and Eval stage. In Eval stage, + // GetScratchBuffer API can be used to fetch the address. + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*RequestScratchBufferInArena)(struct TfLiteContext* ctx, + size_t bytes, int* buffer_idx); + + // Get the scratch buffer pointer. + // This method is only available in Eval stage. + // WARNING: This is an experimental interface that is subject to change. + void* (*GetScratchBuffer)(struct TfLiteContext* ctx, int buffer_idx); + + // Resize the memory pointer of the `tensor`. This method behaves the same as + // `ResizeTensor`, except that it makes a copy of the shape array internally + // so the shape array could be deallocated right afterwards. + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*ResizeTensorExplicit)(struct TfLiteContext* ctx, + TfLiteTensor* tensor, int dims, + const int* shape); + + // This method provides a preview of post-delegation partitioning. Each + // TfLiteDelegateParams in the referenced array corresponds to one instance of + // the delegate kernel. + // Example usage: + // + // TfLiteIntArray* nodes_to_replace = ...; + // TfLiteDelegateParams* params_array; + // int num_partitions = 0; + // TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning( + // context, delegate, nodes_to_replace, ¶ms_array, &num_partitions)); + // for (int idx = 0; idx < num_partitions; idx++) { + // const auto& partition_params = params_array[idx]; + // ... + // } + // + // NOTE: The context owns the memory referenced by partition_params_array. It + // will be cleared with another call to PreviewDelegateParitioning, or after + // TfLiteDelegateParams::Prepare returns. + // + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*PreviewDelegatePartitioning)( + struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace, + TfLiteDelegateParams** partition_params_array, int* num_partitions); + + // Returns a TfLiteTensor struct for a given index. + // WARNING: This is an experimental interface that is subject to change. + // WARNING: This method may not be available on all platforms. + TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context, + int tensor_idx); + + // Returns a TfLiteEvalTensor struct for a given index. + // WARNING: This is an experimental interface that is subject to change. + // WARNING: This method may not be available on all platforms. + TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context, + int tensor_idx); + + // Retrieves named metadata buffer from the TFLite model. + // Returns kTfLiteOk if metadata is successfully obtained from the flatbuffer + // Model: that is, there exists a `metadata` entry with given `name` string. + // (see TFLite's schema.fbs). + // The corresponding `buffer` information is populated in `ptr` & `bytes`. + // The data from `ptr` is valid for the lifetime of the Interpreter. + // + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*GetModelMetadata)(const struct TfLiteContext* context, + const char* name, const char** ptr, + size_t* bytes); +} TfLiteContext; + +// `TfLiteRegistrationExternal` is an external version of `TfLiteRegistration` +// for C API which doesn't use internal types (such as `TfLiteContext`) but only +// uses stable API types (such as `TfLiteOpaqueContext`). The purpose of each +// field is the exactly the same as with `TfLiteRegistration`. +typedef struct TfLiteRegistrationExternal TfLiteRegistrationExternal; + +typedef struct TfLiteRegistration { + // Initializes the op from serialized data. + // Called only *once* for the lifetime of the op, so any one-time allocations + // should be made here (unless they depend on tensor sizes). + // + // If a built-in op: + // `buffer` is the op's params data (TfLiteLSTMParams*). + // `length` is zero. + // If custom op: + // `buffer` is the op's `custom_options`. + // `length` is the size of the buffer. + // + // Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer + // or an instance of a struct). + // + // The returned pointer will be stored with the node in the `user_data` field, + // accessible within prepare and invoke functions below. + // NOTE: if the data is already in the desired format, simply implement this + // function to return `nullptr` and implement the free function to be a no-op. + void* (*init)(TfLiteContext* context, const char* buffer, size_t length); + + // The pointer `buffer` is the data previously returned by an init invocation. + void (*free)(TfLiteContext* context, void* buffer); + + // prepare is called when the inputs this node depends on have been resized. + // context->ResizeTensor() can be called to request output tensors to be + // resized. + // Can be called multiple times for the lifetime of the op. + // + // Returns kTfLiteOk on success. + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); + + // Execute the node (should read node->inputs and output to node->outputs). + // Returns kTfLiteOk on success. + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); + + // profiling_string is called during summarization of profiling information + // in order to group executions together. Providing a value here will cause a + // given op to appear multiple times is the profiling report. This is + // particularly useful for custom ops that can perform significantly + // different calculations depending on their `user-data`. + const char* (*profiling_string)(const TfLiteContext* context, + const TfLiteNode* node); + + // Builtin codes. If this kernel refers to a builtin this is the code + // of the builtin. This is so we can do marshaling to other frameworks like + // NN API. + // Note: It is the responsibility of the registration binder to set this + // properly. + int32_t builtin_code; + + // Custom op name. If the op is a builtin, this will be null. + // Note: It is the responsibility of the registration binder to set this + // properly. + // WARNING: This is an experimental interface that is subject to change. + const char* custom_name; + + // The version of the op. + // Note: It is the responsibility of the registration binder to set this + // properly. + int version; + + // The external version of `TfLiteRegistration`. Since we can't use internal + // types (such as `TfLiteContext`) for C API to maintain ABI stability. + // C API user will provide `TfLiteRegistrationExternal` to implement custom + // ops. We keep it inside of `TfLiteRegistration` and use it to route + // callbacks properly. + TfLiteRegistrationExternal* registration_external; + + // Retrieves asynchronous kernel. + // + // If the `async_kernel` field is nullptr, it means the operation described by + // this TfLiteRegistration object does not support asynchronous execution. + // Otherwise, the function that the field points to should only be called for + // delegate kernel nodes, i.e. `node` should be a delegate kernel node created + // by applying a delegate. + // If the function returns nullptr, that means that the underlying delegate + // does not support asynchronous execution for this `node`. + struct TfLiteAsyncKernel* (*async_kernel)(TfLiteContext* context, + TfLiteNode* node); +} TfLiteRegistration; + +/// \private +// Old version of `TfLiteRegistration` to maintain binary backward +// compatibility. +// The legacy registration type must be a POD struct type whose field types must +// be a prefix of the field types in TfLiteRegistration, and offset of the first +// field in TfLiteRegistration that is not present in the legacy registration +// type must be greater than or equal to the size of the legacy registration +// type. +// WARNING: This structure is deprecated / not an official part of the +// API. It should be only used for binary backward compatibility. +typedef struct TfLiteRegistration_V2 { + void* (*init)(TfLiteContext* context, const char* buffer, size_t length); + void (*free)(TfLiteContext* context, void* buffer); + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); + const char* (*profiling_string)(const TfLiteContext* context, + const TfLiteNode* node); + int32_t builtin_code; + const char* custom_name; + int version; + TfLiteRegistrationExternal* registration_external; +} TfLiteRegistration_V2; + +/// \private +// Old version of `TfLiteRegistration` to maintain binary backward +// compatibility. +// The legacy registration type must be a POD struct type whose field types must +// be a prefix of the field types in TfLiteRegistration, and offset of the first +// field in TfLiteRegistration that is not present in the legacy registration +// type must be greater than or equal to the size of the legacy registration +// type. +// WARNING: This structure is deprecated / not an official part of the +// API. It should be only used for binary backward compatibility. +typedef struct TfLiteRegistration_V1 { + void* (*init)(TfLiteContext* context, const char* buffer, size_t length); + void (*free)(TfLiteContext* context, void* buffer); + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); + const char* (*profiling_string)(const TfLiteContext* context, + const TfLiteNode* node); + int32_t builtin_code; + const char* custom_name; + int version; +} TfLiteRegistration_V1; + +// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the +// values should be 1, 2, 4, 8, ...etc. +typedef enum TfLiteDelegateFlags { + kTfLiteDelegateFlagsNone = 0, + // The flag is set if the delegate can handle dynamic sized tensors. + // For example, the output shape of a `Resize` op with non-constant shape + // can only be inferred when the op is invoked. + // In this case, the Delegate is responsible for calling + // `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling + // `ResizeTensor` when invoking the op. + // + // If the delegate isn't capable to handle dynamic tensors, this flag need + // to be set to false. + kTfLiteDelegateFlagsAllowDynamicTensors = 1, + + // This flag can be used by delegates (that allow dynamic tensors) to ensure + // applicable tensor shapes are automatically propagated in the case of tensor + // resizing. + // This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors + // of a delegate kernel will have correct shapes before its Prepare() method + // is called. The runtime leverages TFLite builtin ops in the original + // execution plan to propagate shapes. + // + // A few points to note: + // 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is + // false, this one is redundant since the delegate kernels are re-initialized + // every time tensors are resized. + // 2. Enabling this flag adds some overhead to AllocateTensors(), since extra + // work is required to prepare the original execution plan. + // 3. This flag requires that the original execution plan only have ops with + // valid registrations (and not 'dummy' custom ops like with Flex). + // WARNING: This feature is experimental and subject to change. + kTfLiteDelegateFlagsRequirePropagatedShapes = 2, + + // This flag can be used by delegates to request per-operator profiling. If a + // node is a delegate node, this flag will be checked before profiling. If + // set, then the node will not be profiled. The delegate will then add per + // operator information using Profiler::EventType::OPERATOR_INVOKE_EVENT and + // the results will appear in the operator-wise Profiling section and not in + // the Delegate internal section. + kTfLiteDelegateFlagsPerOperatorProfiling = 4 +} TfLiteDelegateFlags; + +// WARNING: This is an experimental interface that is subject to change. +typedef struct TfLiteDelegate { + // Data that delegate needs to identify itself. This data is owned by the + // delegate. The delegate is owned in the user code, so the delegate is + // responsible for deallocating this when it is destroyed. + void* data_; + + // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the + // delegate a view of the current graph through TfLiteContext*. It typically + // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels() + // to ask the TensorFlow lite runtime to create macro-nodes to represent + // delegated subgraphs of the original graph. + TfLiteStatus (*Prepare)(TfLiteContext* context, + struct TfLiteDelegate* delegate); + + // Copy the data from delegate buffer handle into raw memory of the given + // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as + // long as it follows the rules for kTfLiteDynamic tensors, in which case this + // cannot be null. + TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context, + struct TfLiteDelegate* delegate, + TfLiteBufferHandle buffer_handle, + TfLiteTensor* tensor); + + // Copy the data from raw memory of the given 'tensor' to delegate buffer + // handle. This can be null if the delegate doesn't use its own buffer. + TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context, + struct TfLiteDelegate* delegate, + TfLiteBufferHandle buffer_handle, + TfLiteTensor* tensor); + + // Free the Delegate Buffer Handle. Note: This only frees the handle, but + // this doesn't release the underlying resource (e.g. textures). The + // resources are either owned by application layer or the delegate. + // This can be null if the delegate doesn't use its own buffer. + void (*FreeBufferHandle)(TfLiteContext* context, + struct TfLiteDelegate* delegate, + TfLiteBufferHandle* handle); + + // Bitmask flags. See the comments in `TfLiteDelegateFlags`. + int64_t flags; + + // The opaque delegate builder associated with this object. If set then the + // TF Lite runtime will give precedence to this field. E.g. instead of + // invoking 'Prepare' via the function pointer inside the 'TfLiteDelegate' + // object, the runtime will first check if the corresponding function + // pointer inside 'opaque_delegate_builder' is set and if so invoke that. + // + // If this field is non-null, then the 'Prepare' field (of the + // 'TfLiteDelegate') should be null. + struct TfLiteOpaqueDelegateBuilder* opaque_delegate_builder; +} TfLiteDelegate; + +// Build a 'null' delegate, with all the fields properly set to their default +// values. +TfLiteDelegate TfLiteDelegateCreate(void); + +// `TfLiteOpaqueDelegateBuilder` is used for constructing +// `TfLiteOpaqueDelegate`, see `TfLiteOpaqueDelegateCreate` below. Note: +// This struct is not ABI stable. +// +// For forward source compatibility `TfLiteOpaqueDelegateBuilder` objects should +// be brace-initialized, so that all fields (including any that might be added +// in the future) get zero-initialized. The purpose of each field is exactly +// the same as with `TfLiteDelegate`. +// +// WARNING: This is an experimental interface that is subject to change. +typedef struct TfLiteOpaqueDelegateBuilder { + // Data that delegate needs to identify itself. This data is owned by the + // delegate. The delegate is owned in the user code, so the delegate is + // responsible for deallocating this when it is destroyed. + void* data; + // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the + // delegate a view of the current graph through TfLiteContext*. It typically + // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels() + // to ask the TensorFlow lite runtime to create macro-nodes to represent + // delegated subgraphs of the original graph. + TfLiteStatus (*Prepare)(TfLiteOpaqueContext* context, // NOLINT + TfLiteOpaqueDelegate* delegate, void* data); + // Copies the data from delegate buffer handle into raw memory of the given + // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as + // long as it follows the rules for kTfLiteDynamic tensors, in which case this + // cannot be null. + TfLiteStatus (*CopyFromBufferHandle)( // NOLINT + TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate, void* data, + TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor); + // Copies the data from raw memory of the given 'tensor' to delegate buffer + // handle. This can be null if the delegate doesn't use its own buffer. + TfLiteStatus (*CopyToBufferHandle)( // NOLINT + TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate, void* data, + TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor); + // Frees the Delegate Buffer Handle. Note: This only frees the handle, but + // this doesn't release the underlying resource (e.g. textures). The + // resources are either owned by application layer or the delegate. + // This can be null if the delegate doesn't use its own buffer. + void (*FreeBufferHandle)(TfLiteOpaqueContext* context, // NOLINT + TfLiteOpaqueDelegate* delegate, void* data, + TfLiteBufferHandle* handle); + // Bitmask flags. See the comments in `TfLiteDelegateFlags`. + int64_t flags; +} TfLiteOpaqueDelegateBuilder; + +// Creates an opaque delegate and returns its address. The opaque delegate will +// behave according to the provided 'opaque_delegate_builder'. The lifetime of +// the objects pointed to by any of the fields within the +// 'opaque_delegate_builder' must outlive the returned +// 'TfLiteOpaqueDelegate' and any 'TfLiteInterpreter', +// 'TfLiteInterpreterOptions', 'tflite::Interpreter', or +// 'tflite::InterpreterBuilder' that the delegate is added to. The returned +// address should be passed to 'TfLiteOpaqueDelegateDelete' for deletion. If +// 'opaque_delegate_builder' is a null pointer, then a null pointer will be +// returned. +TfLiteOpaqueDelegate* TfLiteOpaqueDelegateCreate( + const TfLiteOpaqueDelegateBuilder* opaque_delegate_builder); + +// Deletes the provided opaque 'delegate'. This function has no effect if the +// 'delegate' is a null pointer. +void TfLiteOpaqueDelegateDelete(TfLiteOpaqueDelegate* delegate); + +// Returns a pointer to the data associated with the provided opaque 'delegate'. +// +// A null pointer will be returned when: +// - The 'delegate' is null. +// - The 'data' field of the 'TfLiteOpaqueDelegateBuilder' used to construct the +// 'delegate' was null. +// - Or in case of any other error. +// - The 'delegate' has been constructed via a 'TfLiteOpaqueDelegateBuilder', +// but the 'data' field of the 'TfLiteOpaqueDelegateBuilder' is null. +// +// The data_ field of 'delegate' will be returned if the +// 'opaque_delegate_builder' field is null. +void* TfLiteOpaqueDelegateGetData(const TfLiteOpaqueDelegate* delegate); + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus +#endif // TENSORFLOW_LITE_CORE_C_COMMON_H_