- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 118 for output_types (0.19 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc
} bool changed = false; int next_op_result = 0; for (auto output_type : main_output_types) { if (tensorflow::IsTokenType(output_type)) continue; auto output_type_ranked = mlir::dyn_cast<RankedTensorType>(output_type); if (output_type_ranked == nullptr) { llvm::errs() << "Unsupported XlaCallModule result type: " << output_type << "\n"; return false; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 08 07:28:49 UTC 2024 - 134.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/arithmetic_count_util.h
auto output = op->getResult(0); auto output_type = mlir::dyn_cast_or_null<mlir::RankedTensorType>(output.getType()); if (output_type == nullptr || !output_type.hasStaticShape()) return false; int64_t cols = 1; for (int i = 0; i < output_type.getRank() - 1; ++i) { cols *= output_type.getDimSize(i); } const int64_t cost_per_col = 2 * weight_type.getNumElements();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.cc
auto output = op->getResult(0); auto output_type = mlir::dyn_cast_or_null<RankedTensorType>(output.getType()); if (!output_type) return failure(); // bias should be a vector sized of the last output dim. int64_t num_units = output_type.getDimSize(output_type.getRank() - 1); auto bias_type = mlir::RankedTensorType::get({num_units}, output_type.getElementType()); mlir::DenseElementsAttr bias_attr;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 25.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir
{"quantized_ops": ["${main_op}", "BiasAdd", "Relu"], "act_func": "internal_dequantize_and_relu_fn", "output_type": "f32"}, {"quantized_ops": ["${main_op}", "BiasAdd", "Relu6"], "act_func": "internal_dequantize_and_relu6_fn", "output_type": "f32"}, ] func.func @GenerateQuantizedFunctionName(${quantized_ops}, "${output_type}")(%input : tensor<*xi8>, %filter : tensor<*xi8>, %bias : tensor<*xi32>,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 30.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc
op->getLoc(), *output_type, op.getInput()); rewriter.replaceOpWithNewOp<mhlo::BitcastConvertOp>( op, output_type->clone( mlir::dyn_cast<quant::QuantizedType>(output_type->getElementType()) .getStorageType()), result); return success(); } }; // UniformDequantizeOp takes TF quantized types as input which would have been
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 30.9K bytes - Viewed (0) -
tensorflow/c/kernels/ops/bitcast.cc
TF_DataType output_type, TF_Status* status) { size_t input_type_size = TF_DataTypeSize(input_type); size_t output_type_size = TF_DataTypeSize(output_type); if (input_type_size == 0 || output_type_size == 0) { std::ostringstream err; err << "Cannot bitcast type " << input_type << " to " << output_type << " because one of the type sizes is zero";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 07:51:50 UTC 2024 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc
public: MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ModifyIONodesPass) explicit ModifyIONodesPass() {} explicit ModifyIONodesPass(mlir::Type input_type, mlir::Type output_type) { this->input_type = input_type; this->output_type = output_type; } void runOnOperation() override; private: // Assign the io types from the command line flag. This is only required for // tests.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc
auto input_type = mlir::dyn_cast<RankedTensorType>(dequantize_op.getOutput().getType()); auto output_type = mlir::dyn_cast<RankedTensorType>( passthrough_op->getResult(0).getType()); if (!input_type || !output_type || get_num_elements(input_type) <= get_num_elements(output_type)) { return failure(); } Type input_element_type = getElementTypeOrSelf(dequantize_op.getInput());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.1K bytes - Viewed (0) -
platforms/software/reporting/src/main/java/org/gradle/api/reporting/internal/SimpleReport.java
private final String name; private final Describable displayName; private final OutputType outputType; public SimpleReport(String name, Describable displayName, OutputType outputType) { this.name = name; this.displayName = displayName; this.outputType = outputType; } @Override public String getName() { return name; } @Override
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Fri Feb 09 13:05:08 UTC 2024 - 2.3K bytes - Viewed (0) -
platforms/core-runtime/stdlib-java-extensions/src/main/java/org/gradle/internal/Cast.java
* * @param outputType The type to cast the input to * @param object The object to be cast (must not be {@code null}) * @param <O> The type to be cast to * @param <I> The type of the object to be vast * @return The input object, cast to the output type */ public static <O, I> O cast(Class<O> outputType, I object) { try {
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon Jun 10 14:28:48 UTC 2024 - 3K bytes - Viewed (0)