- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 67 for call_op (0.12 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/tensor_list_ops_decomposition.cc
} for (auto pair : llvm::zip(new_op.getRegions(), case_op.getRegions())) { std::get<0>(pair)->takeBody(*std::get<1>(pair)); } case_op.replaceAllUsesWith( new_op.getResults().take_front(case_op.getNumResults())); case_op.erase(); return success(); } template <typename CallOp> LogicalResult HandlePartitionedCallOp( CallOp call, func::FuncOp callee, ModuleOp module,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 39.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/add_quantization_unit_loc.cc
StringRef op_type_with_suffix = mlir::cast<NameLoc>(locations.front()).getName().strref(); if (!op_type_with_suffix.ends_with(":")) return false; return absl::c_all_of(locations, [](Location loc) { return isa<NameLoc>(loc) || (isa<CallSiteLoc>(loc) && isa<NameLoc>(mlir::cast<CallSiteLoc>(loc).getCallee())); }); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/remove_unused_arguments.cc
llvm::DenseMap<Operation*, llvm::DenseMap<int, int>> args_to_remap; llvm::DenseSet<Operation*> do_not_touch; // Funcs referenced by non-call ops // Find all users of functions that are not through a CallOp. Those // are functions we need to leave alone. module->walk([&](SymbolUserOpInterface op) { if (llvm::isa<CallOpInterface>(op.getOperation())) return;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/hoist_loop_invariant.cc
}); // Get read only variables by checking if their users only have read effect. llvm::DenseSet<ResourceHandle> read_only_vars; for (const auto &[resource_handle, var_handle_ops] : resources) { if (std::all_of(var_handle_ops.begin(), var_handle_ops.end(), [](Operation *op) { for (auto *user : op->getUsers()) { if (!OnlyHasReadEffect(user)) return false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc
TF::UniformQuantizedClipByValueOp>(); target.addDynamicallyLegalOp<TF::CastOp>([](Operation *op) { auto cast_op = llvm::dyn_cast<TF::CastOp>(op); return !IsTFQintType(cast_op.getSrcT()) && !IsTFQintType(cast_op.getDstT()); }); RewritePatternSet patterns(ctx); PopulateLegalizeTfQuantizationPatterns(ctx, &patterns);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 30.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc
int padding_after = padding_before + offset; padding_values[2 * i] = padding_before; padding_values[2 * i + 1] = padding_after; } } if (input_zp_value == 0 || absl::c_all_of(padding_values, [](int v) { return v == 0; })) { padding = CreateConstValue<int32_t>( builder, loc, {num_dims - 2, 2}, SmallVector<int32_t>(padding_values.begin() + 2,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
return uniform_type.getZeroPoint() == 0; } else if (auto per_axis_type = llvm::dyn_cast_or_null<UniformQuantizedPerAxisType>(qtype)) { return absl::c_all_of(per_axis_type.getZeroPoints(), [](int64_t x) { return x == 0; }); } return false; } // Multiplies two 1D arrays with broadcasting support. template <typename T>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc
EXPECT_TRUE(result.method().has_static_range_ptq()); } TEST_F(QuantizationReportTest, InitializeWithModuleOpWithoutQuantizationMethodAttribute) { // A quantized dot_general op but the `CallOp` is missing the // `_quantization_method` attribute. constexpr absl::string_view kQuantizedDotGeneralMissingQuantizationMethodAttr = R"mlir( func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 10:10:34 UTC 2024 - 18.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir
// CHECK: %[[CALL_0:.+]] = call @quantized_dot_general_fn(%[[UNIFORM_QUANTIZE_0]], %[[CONST_0]]) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, {{.*}}>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {{.*}}>) -> tensor<1x3x!quant.uniform<i8:f32, {{.*}}>> // CHECK: %[[UNIFORM_DEQUANTIZE_0:.+]] = stablehlo.uniform_dequantize %[[CALL_0]] : (tensor<1x3x!quant.uniform<i8:f32, {{.*}}>) -> tensor<1x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 91.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc
auto transpose_op = *transpose_ops.begin(); auto result_type = mlir::dyn_cast_or_null<ShapedType>(transpose_op.getResult().getType()); auto is_valid_move = llvm::all_of(op->getOperands(), [result_type](Value operand) -> bool { auto operand_type = mlir::dyn_cast_or_null<ShapedType>(operand.getType()); return result_type && operand_type && result_type.hasRank() &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.3K bytes - Viewed (0)