- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 139 for func_10 (0.32 sec)
-
tensorflow/compiler/mlir/lite/experimental/tac/tests/compute-cost.mlir
// CHECK: tac.cost = 7.864320e+05 func.func @func_0_CPU(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<256x32x32x3xf32>) -> tensor<256x32x32x3xf32> attributes {tac.device = "CPU", tac.interface_name = "func_0"} { %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU", tac.device = "CPU"} : (tensor<256x32x32x3xf32>, tensor<256x32x32x3xf32>) -> tensor<256x32x32x3xf32> func.return %0 : tensor<256x32x32x3xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 24 05:29:10 UTC 2022 - 4.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/common/subgraph.h
// Take the following example: // // call @func_1_CPU {tac.interface_name = "func_1"} // // "func_1" is the interface name where "func_1_cpu" is the real implementation // we can have multiple FuncOps like "func_1_cpu" and "func_1_gpu" and they // both implement "func_1". // // The attribute on the FuncOp means what it actually implements while the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Feb 28 05:18:47 UTC 2023 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc
ASSERT_TRUE(module_op); auto func_op = module_op->lookupSymbol<func::FuncOp>("fully_quantized_add"); ASSERT_THAT(func_op, NotNull()); auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>(); ASSERT_THAT(add_op_itr, Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>())); EXPECT_TRUE(IsOpFullyQuantized(*add_op_itr)); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/merge_duplicate_resource_ops.cc
// exist. // TODO(b/284222084): Move executor dialect utilities to a new library. GraphOp GetGraphOpFromFuncOp(func::FuncOp func_op) { if (func_op->getNumRegions() == 0 || func_op.getBody().empty()) return {}; auto graph_op_range = func_op.front().without_terminator(); if (llvm::hasSingleElement(graph_op_range)) { // The pass runs on a valid tf_executor dialect, so the op should be the // GraphOp.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 26 04:26:16 UTC 2023 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/cluster_tf_ops_pass.cc
func::FuncOp func_op) { llvm::StringMap<FunctionMetadata> metadatas; WalkResult result = func_op.getBody().walk([&](Operation *op) { std::string op_host = GetHost(op); FunctionMetadata &func_metadata = metadatas[op_host]; func_metadata.original_name = func_op.getName(); func_metadata.insertion_point = ++Block::iterator(func_op); func_metadata.ops.push_back(op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/analysis/tensor_array_side_effect_analysis.cc
static bool FunctionContainsOnlyNoSideEffectOpOrTensorArrayOp( mlir::func::FuncOp func_op) { for (mlir::Operation& op : func_op.front()) { if (!mlir::isMemoryEffectFree(&op) && !IsTensorArrayOp(&op)) return false; } return true; } TensorArraySideEffectAnalysis::TensorArraySideEffectAnalysis( mlir::ModuleOp module) { for (auto func_op : module.getOps<mlir::func::FuncOp>()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Nov 16 01:49:07 UTC 2022 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_tf_xla_call_module_to_stablehlo_pass.cc
} return failure(); } }; namespace { bool IsShloMainFuncOp(func::FuncOp func_op) { if (func_op == nullptr) { return false; } if (!func_op.getSymName().contains(kStablehloModuleDefaultEntryFuncName)) { return false; } if (func_op.getSymVisibility() == "nested" || func_op.getSymVisibility() == "private") { return false; } return true; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jan 25 09:43:18 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc
SymbolTable symbol_table(call_op->getParentOfType<ModuleOp>()); auto func_op = dyn_cast_or_null<func::FuncOp>(symbol_table.lookup(func_name)); if (!func_op) return failure(); // The quantized fusion should have requantize and return ops at the end. auto return_op = dyn_cast_or_null<func::ReturnOp>( func_op.getRegion().getBlocks().front().getTerminator()); if (!return_op) return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_main_function.cc
llvm::SmallVector<Type> arg_types, result_types; for (auto func_op : module_op.getOps<func::FuncOp>()) { if (!ShouldIncludeInMainFunction(func_op)) continue; arg_types.append(func_op.getArgumentTypes().begin(), func_op.getArgumentTypes().end()); auto& return_op = func_op.getBody().getBlocks().front().back(); result_types.append(return_op.getOperandTypes().begin(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 16.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/merge_save_function_ops_to_main.cc
}; // Returns true iff func_op has either no Region or the body has no Blocks. bool IsFuncOpEmpty(func::FuncOp func_op) { return func_op->getNumRegions() == 0 || func_op.getBody().empty(); } // Gets the GraphOp from the function op. Returns an empty op iff it doesn't // exist. GraphOp GetGraphOpFromFuncOp(func::FuncOp func_op) { if (IsFuncOpEmpty(func_op)) return {};
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.7K bytes - Viewed (0)