- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 46 for PartitionedCallOp (0.2 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc
class PropagateDequantizeOpIfAllowed : public OpRewritePattern<TF::PartitionedCallOp> { public: explicit PropagateDequantizeOpIfAllowed(MLIRContext* context) : OpRewritePattern<TF::PartitionedCallOp>(context) {} // Create a new dequantize op that is propagated. void createNewDequantizeOp(PatternRewriter& rewriter, TF::PartitionedCallOp original_dequantize_op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc
return quantization_func; } // Post-actions after adding quantization logics. Post-actions include // 1) Adding the created function in the symbol table // 2) Creating a PartitionedCallOp in the main graph that calls the created // function. TF::PartitionedCallOp FinalizeFunctionRegister( PatternRewriter& rewriter, Value input, Value output, func::FuncOp& quantization_func, Operation* quantized_op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc
} Operation *DuplicateOp(TF::PartitionedCallOp call_op, PatternRewriter &rewriter, const StringAttr &new_ref_func_name) { // Create PartitionedCallOp to the copied composite function. This // PartitionedCallOp does not have kQuantTraitAttrName, and therefore won't // get quantized. auto new_call_op = rewriter.create<TF::PartitionedCallOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 13K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc
xla_call_module_op != nullptr) { absl::StatusOr<Method> method = GetQuantizationMethod(xla_call_module_op); if (method.ok() && method->has_static_range_ptq()) return true; } TF::PartitionedCallOp call_op = dyn_cast_or_null<TF::PartitionedCallOp>(op); return call_op && call_op->hasAttrOfType<StringAttr>(kQuantTraitAttrName) && call_op->getAttrOfType<StringAttr>(kQuantTraitAttrName).getValue() == llvm::StringRef(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
class PreprocessConstantOp : public OpRewritePattern<TF::PartitionedCallOp> { public: explicit PreprocessConstantOp(MLIRContext* context, OpSet op_set, QuantMethod quantization_method, bool enable_per_channel_quantization) : OpRewritePattern<TF::PartitionedCallOp>(context), op_set_(op_set), quantization_method_(quantization_method),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
// into [H, W, In, Mul] class RestoreWeightShapePattern : public OpRewritePattern<TF::PartitionedCallOp> { using OpRewritePattern<TF::PartitionedCallOp>::OpRewritePattern; private: LogicalResult addReshapeOpToDepthwiseWeight(TF::PartitionedCallOp op, PatternRewriter& rewriter) const { int weight_operand_idx = 1;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
class CheckQuantizableOps : public mlir::OpRewritePattern<TF::PartitionedCallOp> { public: explicit CheckQuantizableOps(MLIRContext* context, const QuantizationOptions& quant_options) : OpRewritePattern<TF::PartitionedCallOp>(context), quant_options_(quant_options) {} private: LogicalResult matchAndRewrite(TF::PartitionedCallOp call_op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc
if (!preceding_op) continue; // Check whether the preceding op is a quantized composite function. if (llvm::isa<TF::PartitionedCallOp>(preceding_op)) { auto call_op = llvm::cast<TF::PartitionedCallOp>(preceding_op); if (!IsCompositeFunction(call_op)) continue; return true; } // Check if the preceding op is a quantized same-scale op.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 23.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/convert_launch_func_to_tf_call.cc
namespace TFDevice { namespace { #define GEN_PASS_DEF_CONVERTLAUNCHFUNCTOTFCALLPASS #include "tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.h.inc" // Rewrites tf_device::LaunchFuncOp into TF::PartitionedCallOp. struct ConvertLaunchFuncToTFCallPass : public impl::ConvertLaunchFuncToTFCallPassBase< ConvertLaunchFuncToTFCallPass> { void runOnOperation() override; };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 31 21:08:09 UTC 2023 - 2.8K bytes - Viewed (0) -
tensorflow/c/experimental/saved_model/core/revived_types/flat_tensor_function.cc
// In graph mode, we create a PartitionedCallOp instead: // https://github.com/tensorflow/tensorflow/blob/66668ec0ca432e2f38a575b814f45b6d299d01ed/tensorflow/python/eager/function.py#L573 // TODO(bmzhao): After discussing with Allen, we should execute this via a // PartitionedCallOp for compatibility with "tooling that assumes functions in // graphs are PartitionedCallOps".
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 14 19:16:58 UTC 2023 - 3.7K bytes - Viewed (0)