- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 107 for callFoo (0.11 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc
} else if (auto call_op = llvm::dyn_cast<TF::PartitionedCallOp>(&op)) { auto callee = call_op.func(); if (!callee) { return call_op.emitOpError( "resource lifting does not support call with nested references."); } if (failed(HandlePartitionedCallOp(call_op, callee, module, vars_initialized,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 55.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc
func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); Operation* call_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn); EXPECT_FALSE(IsHybridQuantizedOp(call_op)); } constexpr absl::string_view kModuleDotGeneralFullyConnected = R"mlir( module {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 22.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_identity_pruning.cc
[&reachable_funcs](Region& src, SmallVectorImpl<func::FuncOp>& funcs_to_visit) { src.walk([&reachable_funcs, &funcs_to_visit](CallOpInterface call_op) { auto func = dyn_cast_or_null<func::FuncOp>(call_op.resolveCallable()); if (func && reachable_funcs.insert(func).second) funcs_to_visit.push_back(func); }); };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 23:50:19 UTC 2022 - 4.1K bytes - Viewed (0) -
platforms/documentation/docs/src/snippets/native-binaries/cunit/groovy/libs/cunit/2.1-2/include/CUnit/MyMem.h
/** Generate report on tracked memory (old macro). */ #define CU_DUMP_MEMORY_USAGE(x) CU_dump_memory_usage((x)) #else /* MEMTRACE */ /** Standard calloc() if MEMTRACE not defined. */ #define CU_CALLOC(x, y) calloc((x), (y)) /** Standard malloc() if MEMTRACE not defined. */ #define CU_MALLOC(x) malloc((x)) /** Standard free() if MEMTRACE not defined. */
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon Nov 27 17:53:42 UTC 2023 - 4K bytes - Viewed (0) -
src/internal/coverage/calloc/batchcounteralloc.go
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package calloc // This package contains a simple "batch" allocator for allocating // coverage counters (slices of uint32 basically), for working with // coverage data files. Collections of counter arrays tend to all be // live/dead over the same time period, so a good fit for batch // allocation.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Sep 28 11:47:16 UTC 2022 - 754 bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/function-order.mlir
// CHECK: name: "foo" // CHECK: } // CHECK: } %0 = tf_executor.island wraps "tf.PartitionedCall"() {Tin = [], Tout = [], config = "", config_proto = "", device = "", executor_type = "", f = @foo, name = "Call_foo"} : () -> () tf_executor.fetch } func.return } // CHECK: library { // CHECK: function { // CHECK-NEXT: signature { // CHECK-NEXT: name: "bar" // CHECK-NEXT: }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 17 18:52:47 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/tf-gradient-attr.mlir
// CHECK: name: "foo" // CHECK: } %1:2 = tf_executor.island wraps "tf.PartitionedCall"(%0) {Tin = [], Tout = [], config = "", config_proto = "", device = "", executor_type = "", f = @foo, name = "Call_foo"} : (tensor<f32>) -> tensor<*xf32> tf_executor.fetch } func.return } // CHECK: library { // CHECK-NEXT: function { // CHECK-NEXT: signature { // CHECK-NEXT: name: "foo"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 17 18:52:47 UTC 2023 - 1.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc
// than function name. std::unique_ptr<OpQuantSpec> GetTFOpQuantSpec(Operation* op) { auto spec = std::make_unique<OpQuantSpec>(); if (auto call_op = dyn_cast<TF::PartitionedCallOp>(op)) { StringRef function_name = mlir::cast<FlatSymbolRefAttr>(call_op.getFAttr()).getValue(); if (!function_name.starts_with("composite_")) { return spec; } if (function_name.contains("depthwise_conv2d")) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/region_control_flow_to_functional.cc
TF::YieldOp yield = llvm::dyn_cast_or_null<TF::YieldOp>(block.getTerminator()); if (!yield) return std::nullopt; func::CallOp call = llvm::dyn_cast_or_null<func::CallOp>(*block.begin()); if (!call) return std::nullopt; if (block.getNumArguments() != call.getNumOperands() || call.getNumResults() != yield.getNumOperands()) return std::nullopt;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.cc
if (!main_func) continue; SymbolTable symbol_table(module_op); for (auto call_op : main_func.getOps<TF::PartitionedCallOp>()) { func_ops.push_back(dyn_cast_or_null<func::FuncOp>(symbol_table.lookup( mlir::cast<FlatSymbolRefAttr>(call_op.getFAttr()).getValue()))); } for (auto call_op : main_func.getOps<TF::StatefulPartitionedCallOp>()) { func_ops.push_back(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 21K bytes - Viewed (0)