- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 10 for call_op (0.13 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
bool IsQuantizedCallforDynamicRange(TF::PartitionedCallOp call_op) { bool has_quantized_types_for_weights = false; std::unique_ptr<OpQuantSpec> spec = GetTFOpQuantSpec(call_op); for (int32_t cur_idx = 0; cur_idx < call_op.getArgs().size(); cur_idx++) { // Check if the only the weight index has QuantizeCastOp. auto cur_op = dyn_cast_or_null<quantfork::QuantizeCastOp>( call_op.getArgs()[cur_idx].getDefiningOp());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc
return GetInverseScalesConstantOp().getValue(); } func::CallOp GetCallOp() { return call_op_; } FlatSymbolRefAttr GetFunction() { return call_op_.getCalleeAttr(); } private: explicit UniformQuantizeFunctionCallPattern(func::CallOp call_op) : call_op_(call_op) {} func::CallOp call_op_; }; // Matches the following pattern that represents uniform dequantization. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 64.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc
} if (failed(HandleCaseOrIfOp(case_op, branch_functions))) return failure(); } else if (auto call_op = llvm::dyn_cast<TF::PartitionedCallOp>(&op)) { auto callee = call_op.func(); if (!callee) { return call_op.emitOpError( "resource lifting does not support call with nested references."); } if (failed(HandlePartitionedCallOp(call_op, callee, module,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 55.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_pipelining.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 92.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_import.cc
op_builder.setInsertionPointToStart(®ion.front()); auto call_op = op_builder.create<mlir::func::CallOp>( loc, func.getFunctionType().getResults(), func.getSymName(), region.getArguments()); op_builder.create<mlir::TFL::YieldOp>(loc, call_op.getResults()); } void InlineStablehloOpRegion(mlir::Region& region, mlir::func::FuncOp func) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 66.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir
// CHECK: %[[CALL_0:.+]] = call @quantized_dot_general_fn(%[[UNIFORM_QUANTIZE_0]], %[[CONST_0]]) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, {{.*}}>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {{.*}}>) -> tensor<1x3x!quant.uniform<i8:f32, {{.*}}>> // CHECK: %[[UNIFORM_DEQUANTIZE_0:.+]] = stablehlo.uniform_dequantize %[[CALL_0]] : (tensor<1x3x!quant.uniform<i8:f32, {{.*}}>) -> tensor<1x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 91.6K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass.cc
auto allowlist = GetOrCreateAllowlist(); std::vector<string> vall_ops = XlaOpRegistry::GetAllRegisteredOps(); absl::flat_hash_set<string> all_ops(vall_ops.begin(), vall_ops.end()); // Check that user's provided TF operation really exists. for (const auto& s : allowlist) { if (!all_ops.contains(s)) { return errors::InvalidArgument( "The operation '", s,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tensor_array_ops_decomposition.mlir
// CHECK: "tf.IfRegion"(%[[PRED]]) <{is_stateless = false}> ({ %case_op = "tf.IfRegion"(%arg0) <{is_stateless = false}> ({ // CHECK: %[[TA_VAL:.*]] = "tf.ReadVariableOp"(%[[TA_BUFFER]]) // CHECK: "tf.Slice"(%[[TA_VAL]] // CHECK-NOT: tf.TensorArrayReadV3
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 49K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
TEST(XlaCompilationTest, XLALiteAllowlist) { auto* allowlist_table = tensorflow::GetAllowlistTable(); absl::flat_hash_set<string> hallowlist; std::vector<string> vall_ops = XlaOpRegistry::GetAllRegisteredOps(); absl::flat_hash_set<string> all_ops(vall_ops.begin(), vall_ops.end()); // Check that all the operations in the table are existing TF operations for (auto pair : *allowlist_table) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 10:11:10 UTC 2024 - 79.6K bytes - Viewed (0) -
tensorflow/compiler/jit/deadness_analysis.cc
// ~(A & B & C) & (A & B & C) & ... == False // // (~A & ~B & ~C) | A | B | C | ... == // ~(A | B | C) | (A | B | C) | ... == True if (absl::c_all_of(negated_op->GetOperands(), [&](Predicate* p) { return simplified_ops_set.contains(p); })) { return is_and ? MakeFalse() : MakeTrue(); } } negated_ops.insert(negated_op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 60.4K bytes - Viewed (0)