- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 89 for output_types (0.31 sec)
-
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
} // Collect all the quantized outputs and replace them by the results of // the new quantized op. llvm::SmallDenseMap<Value, int> outputs_replaced; SmallVector<Type, 4> output_types; output_types.reserve(quantizing_op->getNumResults()); for (const auto& enumerated_result : llvm::enumerate(quantizing_op->getResults())) { Value result = enumerated_result.value();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc
SmallVector<int64_t, 3> slice_size = {1, num_rows, num_cols}; Type slice_result_type = RankedTensorType::get(slice_size, element_type); llvm::SmallVector<Type, 4> output_types(batch_size, slice_result_type); auto split_op = rewriter.create<TF::SplitOp>(loc, output_types, split_dimension_op.getOutput(), reshape_op.getOutput());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc
"max_percentile", rewriter.getF32FloatAttr( calib_opts_.calibration_parameters().max_percentile())), }; SmallVector<Type, 4> output_types{ value.getType(), RankedTensorType::get({}, rewriter.getF32Type()), RankedTensorType::get({}, rewriter.getF32Type()),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/decompose_optionals.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/mlrt/tf_to_mlrt.mlir
%2 = "tf.Const"() {__op_key = 2: i32, device = "/device:CPU:0", value = dense<1> : tensor<i64>} : () -> tensor<i64> %3 = "tf.RangeDataset"(%0, %1, %2) {__op_key = 3: i32, device = "/device:CPU:0", output_shapes = [#tf_type.shape<>], output_types = [i64], metadata = ""} : (tensor<i64>, tensor<i64>, tensor<i64>) -> tensor<!tf_type.variant> // CHECK: tf_mlrt.executeop{{.*}}op: \22FlatMapDataset\22 // CHECK-SAME: \22__inference_Dataset_flat_map_lambda_19\22
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 20:44:15 UTC 2024 - 24.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_outside_compilation.cc
for (Value v : inputs) operand_types.emplace_back(v.getType()); llvm::SmallVector<Type, 4> output_types; output_types.reserve(outputs.size()); for (Value v : outputs) output_types.emplace_back(v.getType()); auto func_type = builder->getFunctionType(operand_types, output_types); FuncOp outlined_func = FuncOp::create(ops.front()->getLoc(), kHostFunctionAttr, func_type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 68.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
} output_types = {new_output_type}; } else { output_types = {output_type.clone(elem_type.getStorageType())}; } SmallVector<Value> args = {q_op.getArg(), scale, zero_point}; FlatSymbolRefAttr func_name = FlatSymbolRefAttr::get(rewriter.getStringAttr(kQuantizeFuncName)); auto quantize_call = rewriter.create<TF::PartitionedCallOp>( loc, output_types, args, func_name,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tfg-to-tfe.cc
mlir::ArrayAttr array_attr, llvm::SmallVector<mlir::Type> &output_types, llvm::SmallVector<mlir::DictionaryAttr> &output_attrs) { for (auto it : llvm::zip( types, array_attr.template getAsRange<mlir::DictionaryAttr>())) { if (mlir::isa<tfg::ControlType>(std::get<0>(it))) continue; output_types.push_back(std::get<0>(it)); mlir::NamedAttrList list;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 21.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_validate_inputs.cc
bool IsMustNotBeXlaOp(Operation* op) { for (auto& input : op->getOpOperands()) { if (TypeMustBeNonXLA(input.get().getType())) return true; } for (auto output_types : op->getResultTypes()) { if (TypeMustBeNonXLA(output_types)) return true; } return false; } // Check if the op must be compiled with XLA. If the op does not satisfy this
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 06:51:01 UTC 2024 - 21.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_device.cc
void ParallelExecuteOp::build(OpBuilder& builder, OperationState& state, int num_regions, TypeRange output_types) { DCHECK_GE(num_regions, 1); for (int i = 0; i < num_regions; ++i) { Region* region = state.addRegion(); region->push_back(new Block); } state.addTypes(output_types); } Block& ParallelExecuteOp::GetRegionBlockWithIndex(unsigned index) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 33.4K bytes - Viewed (0)