- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 14 for PartitionedCallOp (0.23 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc
class PropagateDequantizeOpIfAllowed : public OpRewritePattern<TF::PartitionedCallOp> { public: explicit PropagateDequantizeOpIfAllowed(MLIRContext* context) : OpRewritePattern<TF::PartitionedCallOp>(context) {} // Create a new dequantize op that is propagated. void createNewDequantizeOp(PatternRewriter& rewriter, TF::PartitionedCallOp original_dequantize_op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/convert_launch_func_to_tf_call.cc
namespace TFDevice { namespace { #define GEN_PASS_DEF_CONVERTLAUNCHFUNCTOTFCALLPASS #include "tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.h.inc" // Rewrites tf_device::LaunchFuncOp into TF::PartitionedCallOp. struct ConvertLaunchFuncToTFCallPass : public impl::ConvertLaunchFuncToTFCallPassBase< ConvertLaunchFuncToTFCallPass> { void runOnOperation() override; };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 31 21:08:09 UTC 2023 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc
class CheckQuantizableOps : public mlir::OpRewritePattern<TF::PartitionedCallOp> { public: explicit CheckQuantizableOps(MLIRContext* context, const QuantMethod quantization_method, const OpSet target_opset, const int min_num_elements_for_weights) : OpRewritePattern<TF::PartitionedCallOp>(context), quantization_method_(quantization_method),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" namespace mlir { namespace quant { std::optional<TF::PartitionedCallOp> ApplyUniformQuantization( PatternRewriter& rewriter, TF::ConstOp op, tensorflow::quantization::QuantizationComponentSpec& weight_spec); } // namespace quant } // namespace mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Mar 24 07:44:40 UTC 2024 - 1.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op_test.cc
quant_spec.set_quantization_component( QuantizationComponentSpec::COMPONENT_WEIGHT); quant_spec.set_tensor_type(QuantizationComponentSpec::TENSORTYPE_INT_8); std::optional<TF::PartitionedCallOp> dequantize_op = ApplyUniformQuantization( pattern_rewriter, cast<TF::ConstOp>(value.getDefiningOp()), quant_spec); EXPECT_TRUE(dequantize_op.has_value()); EXPECT_EQ(dequantize_op.value().func().getName().str(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_inline_tpu_island.cc
Operation *nested_module = symbol_table.lookup(kNestedModule); if (!nested_module) return; InlinerInterface inliner(&getContext()); auto walk_result = getOperation().walk([&](TF::PartitionedCallOp call_op) { if (!call_op.getF().getRootReference().getValue().starts_with( kNestedModule)) return WalkResult::advance(); // This is a call we need to inline! LLVM_DEBUG(llvm::dbgs()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 19 08:06:04 UTC 2023 - 4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.td
==============================================================================*/ include "mlir/IR/OpBase.td" include "mlir/IR/PatternBase.td" include "mlir/Dialect/Func/IR/FuncOps.td" // Creates a function call with TF::PartitionedCallOp and a new function to // wrap the section between arguments and results. // // The `returns` number indicates the number of results the function returns. class LiftAsTFPartitionedCall<string func_name, int returns = 1> :
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 25 00:32:20 UTC 2024 - 3.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tpu_model_to_cpu.cc
// its original arguments except for the last element. SmallVector<Value> args = call_op.getOperands().drop_back(); rewriter.replaceOpWithNewOp<TF::PartitionedCallOp>( call_op, float_func.getResultTypes(), args, f_attr); return success(); } }; void ConvertTpuModelToCpuPass::runOnOperation() { MLIRContext* ctx = &getContext();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h
// Creates a function to wrap the section between arguments and results. // The generated function call op type will be decided by the given call_op_type // argument. Currently, it supports TF::XlaCallModuleOp and // TF::PartitionedCallOp function call op generations. SmallVector<Value, 4> LiftAsFunctionCall(OpBuilder& builder, Location location, FunctionCallOpType call_op_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/xla_cluster_formation.cc
outermost_pcall_ops; if (mlir::failed( mlir::GetOpsOfTypeUntilMiss<mlir::TF::StatefulPartitionedCallOp, mlir::TF::PartitionedCallOp>( func, symtab, /*predicate*/ has_no_compile_device_type, /*hits*/ noinline_pcall_ops, /*first_misses*/ outermost_pcall_ops))) { return mlir::failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 19 19:09:44 UTC 2023 - 6K bytes - Viewed (0)