/*
|
* Copyright (C) 2018 The Android Open Source Project
|
*
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
* you may not use this file except in compliance with the License.
|
* You may obtain a copy of the License at
|
*
|
* http://www.apache.org/licenses/LICENSE-2.0
|
*
|
* Unless required by applicable law or agreed to in writing, software
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
* See the License for the specific language governing permissions and
|
* limitations under the License.
|
*/
|
|
package android.hardware.neuralnetworks@1.1;
|
|
import @1.0::Operand;
|
import @1.0::OperationType;
|
import @1.0::PerformanceInfo;
|
|
/**
|
* Operation types.
|
*
|
* The type of an operation in a model.
|
*/
|
enum OperationType : @1.0::OperationType {
|
|
/**
|
* BatchToSpace for N-dimensional tensors.
|
*
|
* This operation reshapes the batch dimension (dimension 0) into M + 1
|
* dimensions of shape block_shape + [batch], interleaves these blocks back
|
* into the grid defined by the spatial dimensions [1, ..., M], to obtain a
|
* result with the same rank as the input.
|
*
|
* This is the reverse of SpaceToBatch.
|
*
|
* Supported tensor {@link OperandType}:
|
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
*
|
* Supported tensor rank: 4
|
*
|
* Inputs:
|
* * 0: An n-D tensor, specifying the tensor to be reshaped
|
* * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
|
* sizes for each spatial dimension of the input tensor. All values
|
* must be >= 1.
|
*
|
* Outputs:
|
* * 0: A tensor of the same {@link OperandType} as input0.
|
*
|
* Available since API level 28.
|
*/
|
BATCH_TO_SPACE_ND = 29,
|
|
/**
|
* Element-wise division of two tensors.
|
*
|
* Takes two input tensors of identical {@link OperandType} and compatible
|
* dimensions. The output is the result of dividing the first input tensor
|
* by the second, optionally modified by an activation function.
|
*
|
* Two dimensions are compatible when:
|
* 1. they are equal, or
|
* 2. one of them is 1
|
*
|
* The size of the output is the maximum size along each dimension of the
|
* input operands. It starts with the trailing dimensions, and works its way
|
* forward.
|
*
|
* Example:
|
* input1.dimension = {4, 1, 2}
|
* input2.dimension = {5, 4, 3, 1}
|
* output.dimension = {5, 4, 3, 2}
|
*
|
* Supported tensor {@link OperandType}:
|
* * {@link OperandType::TENSOR_FLOAT32}
|
*
|
* Supported tensor rank: up to 4
|
*
|
* Inputs:
|
* * 0: An n-D tensor, specifying the first input.
|
* * 1: A tensor of the same {@link OperandType}, and compatible dimensions
|
* as input0.
|
* * 2: An {@link OperandType::INT32} scalar, and has to be one of the
|
* {@link FusedActivationFunc} values. Specifies the activation to
|
* invoke on the result.
|
*
|
* Outputs:
|
* * 0: A tensor of the same {@link OperandType} as input0.
|
*
|
* Available since API level 28.
|
*/
|
DIV = 30,
|
|
/**
|
* Computes the mean of elements across dimensions of a tensor.
|
*
|
* Reduces the input tensor along the given dimensions to reduce. Unless
|
* keep_dims is true, the rank of the tensor is reduced by 1 for each entry
|
* in axis. If keep_dims is true, the reduced dimensions are retained with
|
* length 1.
|
*
|
* Supported tensor {@link OperandType}:
|
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
*
|
* Supported tensor rank: up to 4
|
*
|
* Inputs:
|
* * 0: A tensor, specifying the input.
|
* * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}. The dimensions
|
* to reduce. Must be in the range
|
* [-rank(input_tensor), rank(input_tensor)).
|
*
|
* NOTE: When the operation was introduced, the documentation
|
* incorrectly stated that if dimensions were empty, the operation
|
* would reduce across all dimensions. This behavior was never
|
* implemented.
|
*
|
* * 2: An {@link OperandType::INT32} scalar, keep_dims. If positive,
|
* retains reduced dimensions with length 1.
|
*
|
* Outputs:
|
* * 0: A tensor of the same {@link OperandType} as input0.
|
*
|
* Available since API level 28.
|
*/
|
MEAN = 31,
|
|
/**
|
* Pads a tensor.
|
*
|
* This operation pads a tensor according to the specified paddings.
|
*
|
* Supported tensor {@link OperandType}:
|
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM} (the pad value is undefined)
|
*
|
* Supported tensor rank: up to 4
|
*
|
* Inputs:
|
* * 0: An n-D tensor, specifying the tensor to be padded.
|
* * 1: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
|
* for each spatial dimension of the input tensor. The shape of the
|
* tensor must be {rank(input0), 2}.
|
* padding[i, 0] specifies the number of elements to be padded in the
|
* front of dimension i.
|
* padding[i, 1] specifies the number of elements to be padded after the
|
* end of dimension i.
|
*
|
* Outputs:
|
* * 0: A tensor of the same {@link OperandType} as input0. The
|
* output tensor has the same rank as input0, and each
|
* dimension of the output tensor has the same size as the
|
* corresponding dimension of the input tensor plus the size
|
* of the padding:
|
* output0.dimension[i] =
|
* padding[i, 0] + input0.dimension[i] + padding[i, 1]
|
*
|
* NOTE: The pad value for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
|
* is undefined.
|
*
|
* Available since API level 28.
|
*/
|
PAD = 32,
|
|
/**
|
* SpaceToBatch for N-Dimensional tensors.
|
*
|
* This operation divides "spatial" dimensions [1, ..., M] of the input into
|
* a grid of blocks of shape block_shape, and interleaves these blocks with
|
* the "batch" dimension (0) such that in the output, the spatial dimensions
|
* [1, ..., M] correspond to the position within the grid, and the batch
|
* dimension combines both the position within a spatial block and the
|
* original batch position. Prior to division into blocks, the spatial
|
* dimensions of the input are optionally zero padded according to paddings.
|
*
|
* Supported tensor {@link OperandType}:
|
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
*
|
* Supported tensor rank: 4
|
*
|
* Inputs:
|
* * 0: An n-D tensor, specifying the input.
|
* * 1: A 1-D Tensor of {@link OperandType::TENSOR_INT32}, the block
|
* sizes for each spatial dimension of the input tensor. All values
|
* must be >= 1.
|
* * 2: A 2-D Tensor of {@link OperandType::TENSOR_INT32}, the paddings
|
* for each spatial dimension of the input tensor. All values must be
|
* >= 0. The shape of the tensor must be {M, 2}, where M is the number
|
* of spatial dimensions.
|
* padding[i, 0] specifies the number of element to be padded in the
|
* front of dimension i.
|
* padding[i, 1] specifies the number of element to be padded after the
|
* end of dimension i.
|
*
|
* Outputs:
|
* * 0: A tensor of the same {@link OperandType} as input0.
|
*
|
* Available since API level 28.
|
*/
|
SPACE_TO_BATCH_ND = 33,
|
|
/**
|
* Removes dimensions of size 1 from the shape of a tensor.
|
*
|
* Given a tensor input, this operation returns a tensor of the same
|
* {@link OperandType} with all dimensions of size 1 removed. If you don't
|
* want to remove all size 1 dimensions, you can remove specific size 1
|
* dimensions by specifying the axes (input1).
|
*
|
* Supported tensor {@link OperandType}:
|
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
*
|
* Supported tensor rank: up to 4
|
*
|
* Inputs:
|
* * 0: An n-D tensor, the tensor to be squeezed.
|
* * 1: An optional 1-D tensor of {@link OperandType::TENSOR_INT32}. The
|
* dimensions to squeeze. If specified only squeezes the dimensions
|
* listed. Otherwise, squeezes all dimensions. The dimension index
|
* starts at 0. An error must be reported if squeezing a dimension that
|
* is not 1.
|
*
|
* Outputs:
|
* * 0: A tensor of the same {@link OperandType} as input0. Contains the
|
* same data as input, but has one or more dimensions of size 1
|
* removed.
|
*
|
* Available since API level 28.
|
*/
|
SQUEEZE = 34,
|
|
/**
|
* Extracts a strided slice of a tensor.
|
*
|
* Roughly speaking, this op extracts a slice of size (end - begin) / stride
|
* from the given input tensor. Starting at the location specified by begin
|
* the slice continues by adding stride to the index until all dimensions
|
* are not less than end. Note that a stride can be negative, which causes a
|
* reverse slice.
|
*
|
* Supported tensor {@link OperandType}:
|
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
*
|
* Supported tensor rank: up to 4
|
*
|
* Inputs:
|
* * 0: An n-D tensor, specifying the tensor to be sliced.
|
* * 1: begin, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
|
* starts of the dimensions of the input tensor to be sliced. The
|
* length must be of rank(input0).
|
* * 2: end, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
|
* ends of the dimensions of the input tensor to be sliced. The length
|
* must be of rank(input0).
|
* * 3: strides, a 1-D tensor of {@link OperandType::TENSOR_INT32}. The
|
* strides of the dimensions of the input tensor to be sliced. The
|
* length must be of rank(input0). The entries must be non-zero.
|
* * 4: begin_mask, an {@link OperandType::INT32} scalar. If the ith bit
|
* of begin_mask is set, begin[i] is ignored and the fullest possible
|
* range in that dimension is used instead.
|
* * 5: end_mask, an {@link OperandType::INT32} scalar. If the ith bit of
|
* end_mask is set, end[i] is ignored and the fullest possible range in
|
* that dimension is used instead.
|
* * 6: shrink_axis_mask, an {@link OperandType::INT32} scalar. If the
|
* ith bit of shrink_axis_mask is set, the ith dimension specification
|
* shrinks the dimensionality by 1, taking on the value at index
|
* begin[i]. In this case, the ith specification must define a
|
* slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
|
*
|
* Outputs:
|
* * 0: A tensor of the same {@link OperandType} as input0 and rank (n - k),
|
* where k is the number of bits set in shrink_axis_mask.
|
*
|
* Available since API level 28.
|
*/
|
STRIDED_SLICE = 35,
|
|
/**
|
* Element-wise subtraction of two tensors.
|
*
|
* Takes two input tensors of identical {@link OperandType} and compatible
|
* dimensions. The output is the result of subtracting the second input
|
* tensor from the first one, optionally modified by an activation function.
|
*
|
* Two dimensions are compatible when:
|
* 1. they are equal, or
|
* 2. one of them is 1
|
*
|
* The size of the output is the maximum size along each dimension of the
|
* input operands. It starts with the trailing dimensions, and works its way
|
* forward.
|
*
|
* Example:
|
* input1.dimension = {4, 1, 2}
|
* input2.dimension = {5, 4, 3, 1}
|
* output.dimension = {5, 4, 3, 2}
|
*
|
* Supported tensor {@link OperandType}:
|
* * {@link OperandType::TENSOR_FLOAT32}
|
*
|
* Supported tensor rank: up to 4
|
*
|
* Inputs:
|
* * 0: An n-D tensor, specifying the first input.
|
* * 1: A tensor of the same {@link OperandType}, and compatible dimensions
|
* as input0.
|
* * 2: An {@link OperandType::INT32} scalar, and has to be one of the
|
* {@link FusedActivationFunc} values. Specifies the activation to
|
* invoke on the result.
|
*
|
* Outputs:
|
* * 0: A tensor of the same {@link OperandType} as input0.
|
*
|
* Available since API level 28.
|
*/
|
SUB = 36,
|
|
/**
|
* Transposes the input tensor, permuting the dimensions according to the
|
* perm tensor.
|
*
|
* The returned tensor's dimension i corresponds to the input dimension
|
* perm[i]. If perm is not given, it is set to (n-1...0), where n is the
|
* rank of the input tensor. Hence by default, this operation performs a
|
* regular matrix transpose on 2-D input Tensors.
|
*
|
* Supported tensor {@link OperandType}:
|
* * {@link OperandType::TENSOR_FLOAT32}
|
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
|
*
|
* Supported tensor rank: up to 4
|
*
|
* Inputs:
|
* * 0: An n-D tensor, specifying the tensor to be transposed.
|
* * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32},
|
* the permutation of the dimensions of the input tensor.
|
*
|
* Outputs:
|
* * 0: A tensor of the same {@link OperandType} as input0.
|
*
|
* Available since API level 28.
|
*/
|
TRANSPOSE = 37,
|
|
};
|
|
/**
|
* The capabilities of a driver.
|
*/
|
struct Capabilities {
|
/**
|
* Driver performance when operating on float32 data.
|
*/
|
PerformanceInfo float32Performance;
|
|
/**
|
* Driver performance when operating on asymmetric 8-bit quantized data.
|
*/
|
PerformanceInfo quantized8Performance;
|
|
/**
|
* Driver performance when operating on float32 data but performing
|
* calculations with range and/or precision as low as that of the IEEE
|
* 754 16-bit floating-point format.
|
*/
|
PerformanceInfo relaxedFloat32toFloat16Performance;
|
};
|
|
/**
|
* Describes one operation of the model's graph.
|
*/
|
struct Operation {
|
/**
|
* The operation type.
|
*/
|
OperationType type;
|
|
/**
|
* Describes the table that contains the indexes of the inputs of the
|
* operation. The offset is the index in the operandIndexes table.
|
*/
|
vec<uint32_t> inputs;
|
|
/**
|
* Describes the table that contains the indexes of the outputs of the
|
* operation. The offset is the index in the operandIndexes table.
|
*/
|
vec<uint32_t> outputs;
|
};
|
|
/**
|
* A Neural Network Model.
|
*
|
* This includes not only the execution graph, but also constant data such as
|
* weights or scalars added at construction time. The only information that
|
* may not be known is the shape of the input tensors.
|
*/
|
struct Model {
|
/**
|
* All operands included in the model.
|
*/
|
vec<Operand> operands;
|
|
/**
|
* All operations included in the model.
|
*
|
* The operations are sorted into execution order. Every operand
|
* with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be
|
* written before it is read.
|
*/
|
vec<Operation> operations;
|
|
/**
|
* Input indexes of the model. There must be at least one.
|
*
|
* Each value corresponds to the index of the operand in "operands".
|
*/
|
vec<uint32_t> inputIndexes;
|
|
/**
|
* Output indexes of the model. There must be at least one.
|
*
|
* Each value corresponds to the index of the operand in "operands".
|
*/
|
vec<uint32_t> outputIndexes;
|
|
/**
|
* A byte buffer containing operand data that were copied into the model.
|
*
|
* An operand's value must be located here if and only if Operand::lifetime
|
* equals OperandLifeTime::CONSTANT_COPY.
|
*/
|
vec<uint8_t> operandValues;
|
|
/**
|
* A collection of shared memory pools containing operand values.
|
*
|
* An operand's value must be located here if and only if Operand::lifetime
|
* equals OperandLifeTime::CONSTANT_REFERENCE.
|
*/
|
vec<memory> pools;
|
|
/**
|
* 'true' indicates TENSOR_FLOAT32 may be calculated with range and/or
|
* precision as low as that of the IEEE 754 16-bit floating-point format.
|
* 'false' indicates TENSOR_FLOAT32 must be calculated using at least the
|
* range and precision of the IEEE 754 32-bit floating-point format.
|
*/
|
bool relaxComputationFloat32toFloat16;
|
};
|
|
/**
|
* Execution preferences.
|
*/
|
enum ExecutionPreference : int32_t {
|
/**
|
* Prefer executing in a way that minimizes battery drain.
|
* This is desirable for compilations that will be executed often.
|
*/
|
LOW_POWER = 0,
|
/**
|
* Prefer returning a single answer as fast as possible, even if this causes
|
* more power consumption.
|
*/
|
FAST_SINGLE_ANSWER = 1,
|
/**
|
* Prefer maximizing the throughput of successive frames, for example when
|
* processing successive frames coming from the camera.
|
*/
|
SUSTAINED_SPEED = 2,
|
};
|