- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 57 for call_op (0.48 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
// Checks if an op calls a composite function and all the inputs and outputs are // quantized. bool IsQuantizedCompositeFunction(func::CallOp call_op) { if (!call_op.getCallee().starts_with("quantized_")) { return false; } bool has_quantized_types = false; for (Value operand : call_op.getOperands()) { if (const TensorType type = mlir::dyn_cast<TensorType>(operand.getType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h
// quantization. inline FlatSymbolRefAttr GetFuncAttr(TF::PartitionedCallOp call_op) { return mlir::dyn_cast<FlatSymbolRefAttr>(call_op.getFAttr()); } inline FlatSymbolRefAttr GetFuncAttr(TF::XlaCallModuleOp call_op) { return call_op->getAttrOfType<FlatSymbolRefAttr>( TF::kStablehloEntryFunctionAttrName); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc
func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); Operation* call_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn); EXPECT_FALSE(IsHybridQuantizedOp(call_op)); } constexpr absl::string_view kModuleDotGeneralFullyConnected = R"mlir( module {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 22.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.cc
if (!main_func) continue; SymbolTable symbol_table(module_op); for (auto call_op : main_func.getOps<TF::PartitionedCallOp>()) { func_ops.push_back(dyn_cast_or_null<func::FuncOp>(symbol_table.lookup( mlir::cast<FlatSymbolRefAttr>(call_op.getFAttr()).getValue()))); } for (auto call_op : main_func.getOps<TF::StatefulPartitionedCallOp>()) { func_ops.push_back(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 21K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc
Operation* next_op = cur_op_use.getOwner(); int next_op_operand_num = cur_op_use.getOperandNumber(); if (auto call_op = llvm::dyn_cast<mlir::CallOpInterface>(next_op)) { mlir::func::FuncOp func = llvm::dyn_cast<mlir::func::FuncOp>(call_op.resolveCallable()); if (!func) continue; next_values_to_visit.push_back(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 11.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_main_function.cc
result_idx += func_op.getNumResults(); auto call_op = builder.create<TF::PartitionedCallOp>( module_op.getLoc(), new_types, new_args, SymbolRefAttr::get(context, func_op.getSymName()), /*config=*/builder.getStringAttr(""), /*config_proto=*/builder.getStringAttr(""), /*executor_type=*/builder.getStringAttr("")); call_op_returns.append(call_op.getResults().begin(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 16.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc
return failure(); } } return success(); } LogicalResult CheckOutputConsumer( Operation* call_op, int expected_num_outputs, llvm::DenseSet<int> expected_consumer_indices) { const int num_results = call_op->getNumResults(); if (num_results != expected_num_outputs) return failure(); for (int i = 0; i < expected_num_outputs; ++i) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter_test.cc
} auto a = Parameter(&builder, 0, ShapeUtil::MakeScalarShape(xla::F32), "a"); auto b = Parameter(&builder, 1, ShapeUtil::MakeScalarShape(xla::F32), "b"); XlaOp call_op = xla::Call(&builder, to_apply, {a, b}); std::vector<XlaOp> tuple_values; tuple_values.push_back(call_op); xla::Tuple(&builder, tuple_values); TF_ASSERT_OK_AND_ASSIGN(XlaComputation computation, builder.Build());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:16:07 UTC 2024 - 11.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_sharding_identification_pass.cc
next_values_to_visit.push_back(use.getOwner()->getResult(0)); continue; } if (auto call_op = llvm::dyn_cast<mlir::CallOpInterface>(owner)) { mlir::func::FuncOp func = llvm::dyn_cast<mlir::func::FuncOp>(call_op.resolveCallable()); if (!func) continue; next_values_to_visit.push_back( func.getArgument(use.getOperandNumber()));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 02:01:13 UTC 2024 - 28.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/tests/ops.mlir
func.func private @tensor_invalid_1() -> !tfr.tensor<[N, T> // ----- // expected-error@+1 {{unbalanced}} func.func @tensor_invalid_2() -> !tfr.tensor<[N, T] // ----- // CHECK-LABEL: call_op func.func @call_op(%arg0: !tfr.tensor<T>, %arg1: !tfr.tensor_list<TL>, %arg2: i32) -> !tfr.tensor<K> { %0 = tfr.call @Foo(%arg0, %arg1, %arg2) : (!tfr.tensor<T>, !tfr.tensor_list<TL>, i32) -> !tfr.tensor<K> func.return %0 : !tfr.tensor<K>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jan 14 22:15:06 UTC 2023 - 13.1K bytes - Viewed (0)