- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 123 for input_dtype (0.29 sec)
-
tensorflow/c/kernels_experimental.cc
tensorflow::Var** maybe_resource) { auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx); *maybe_resource = nullptr; if (cc_ctx->input_dtype(input) == tensorflow::DT_RESOURCE) { if (LookupResource(cc_ctx, HandleFromInput(cc_ctx, input), maybe_resource) .ok()) { return (*maybe_resource)->mu(); } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 23 06:12:29 UTC 2024 - 30.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc
absl::StatusOr<ElementsAttr> ConvertTensor(const Tensor& input_tensor, Builder* builder) { const auto& input_dtype = input_tensor.dtype(); const auto& input_shape = input_tensor.shape(); Type elt_type; TF_RETURN_IF_ERROR(ConvertDataType(input_dtype, *builder, &elt_type)); SmallVector<int64_t, 4> shape; ConvertToMlirShape(input_shape, &shape);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 20.5K bytes - Viewed (0) -
tensorflow/c/kernels.cc
auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx); CHECK_GE(index, 0); // Crash OK CHECK_LT(index, cc_ctx->num_inputs()); // Crash OK return static_cast<TF_DataType>(cc_ctx->input_dtype(index)); } void TF_SetOutput(TF_OpKernelContext* ctx, int i, const TF_Tensor* tensor, TF_Status* status) { auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 22:53:47 UTC 2024 - 36K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/perception_ops_utils_test.cc
auto input_type = RankedTensorType::get({1, 2, 2, 1}, builder_->getF32Type()); auto output_type = RankedTensorType::get({1, 2, 1, 1}, builder_->getF32Type()); SmallVector<mlir::Type, 1> input_types{input_type}; SmallVector<mlir::Type, 1> output_types{output_type}; auto max_unpooling_func = createMaxUnpoolingFunc<1, 1>(builder_.get(), input_types, output_types);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Sep 29 21:02:21 UTC 2022 - 7.4K bytes - Viewed (0) -
tensorflow/cc/gradients/functional_grad.cc
} std::vector<Output> func_inputs; std::vector<DataType> input_dtypes; const int num_inputs = op.num_inputs(); func_inputs.reserve(num_inputs + grad_inputs.size()); input_dtypes.reserve(num_inputs); for (int i = 0; i < num_inputs; i++) { func_inputs.push_back(op.input(i)); input_dtypes.push_back(op.input_type(i)); } func_inputs.insert(std::end(func_inputs), std::begin(grad_inputs),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Oct 15 20:09:06 UTC 2021 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h
// process. struct ExpressedToQuantizedConverter { // Creates a converter for the given input type. static ExpressedToQuantizedConverter forInputType(Type input_type); // Converts the inputType to be based on the given elemental type, // returning the new type (or nullptr and emit an error on failure). Type convert(quant::QuantizedType elemental_type) const; // Whether the conversion is legal.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
mlir::OpBuilder builder(func.getBody()); auto& bb = func.front(); auto loc = func.getLoc(); int num_args = bb.getNumArguments(); llvm::SmallVector<Type, 4> input_types; input_types.reserve(num_args); // Edit the block arguments and create the new input ops in place to replace // the old input ops and quantize ops. for (int i = 0; i != num_args; ++i) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/ir/tfr_ops.cc
!result_type.isa<IntegerType>()) { return nullptr; } auto input_itype = input.getType().cast<IntegerType>(); auto result_itype = result_type.cast<IntegerType>(); if (input_itype.getWidth() == result_itype.getWidth()) return nullptr; if (input_itype.getWidth() > result_itype.getWidth()) { return builder.create<arith::TruncIOp>(conversion_loc, result_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Nov 21 16:55:41 UTC 2023 - 38.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc
auto output_type = RankedTensorType::get(output_shape, builder->getF32Type()); SmallVector<mlir::Type, 4> input_types{input_type, weight_type, bias_type, projection_type, layer_norm_scale_type}; auto func_type = builder->getFunctionType(input_types, output_type); auto func = func::FuncOp::create(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils.cc
Operation** result) { auto input_type = mlir::cast<RankedTensorType>(input.getType()); SmallVector<int64_t, 4> output_shape; int size_of_splits; if (input_type.getRank() < axis || axis < 0) return failure(); for (int i = 0; i < input_type.getRank(); ++i) { int64_t dim = input_type.getDimSize(i); if (i == axis) { if (dim % splits != 0) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 36.2K bytes - Viewed (0)