- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 32 for PartitionedCallOp (0.4 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc
class PropagateDequantizeOpIfAllowed : public OpRewritePattern<TF::PartitionedCallOp> { public: explicit PropagateDequantizeOpIfAllowed(MLIRContext* context) : OpRewritePattern<TF::PartitionedCallOp>(context) {} // Create a new dequantize op that is propagated. void createNewDequantizeOp(PatternRewriter& rewriter, TF::PartitionedCallOp original_dequantize_op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc
return quantization_func; } // Post-actions after adding quantization logics. Post-actions include // 1) Adding the created function in the symbol table // 2) Creating a PartitionedCallOp in the main graph that calls the created // function. TF::PartitionedCallOp FinalizeFunctionRegister( PatternRewriter& rewriter, Value input, Value output, func::FuncOp& quantization_func, Operation* quantized_op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc
xla_call_module_op != nullptr) { absl::StatusOr<Method> method = GetQuantizationMethod(xla_call_module_op); if (method.ok() && method->has_static_range_ptq()) return true; } TF::PartitionedCallOp call_op = dyn_cast_or_null<TF::PartitionedCallOp>(op); return call_op && call_op->hasAttrOfType<StringAttr>(kQuantTraitAttrName) && call_op->getAttrOfType<StringAttr>(kQuantTraitAttrName).getValue() == llvm::StringRef(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
class PreprocessConstantOp : public OpRewritePattern<TF::PartitionedCallOp> { public: explicit PreprocessConstantOp(MLIRContext* context, OpSet op_set, QuantMethod quantization_method, bool enable_per_channel_quantization) : OpRewritePattern<TF::PartitionedCallOp>(context), op_set_(op_set), quantization_method_(quantization_method),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
// into [H, W, In, Mul] class RestoreWeightShapePattern : public OpRewritePattern<TF::PartitionedCallOp> { using OpRewritePattern<TF::PartitionedCallOp>::OpRewritePattern; private: LogicalResult addReshapeOpToDepthwiseWeight(TF::PartitionedCallOp op, PatternRewriter& rewriter) const { int weight_operand_idx = 1;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
class CheckQuantizableOps : public mlir::OpRewritePattern<TF::PartitionedCallOp> { public: explicit CheckQuantizableOps(MLIRContext* context, const QuantizationOptions& quant_options) : OpRewritePattern<TF::PartitionedCallOp>(context), quant_options_(quant_options) {} private: LogicalResult matchAndRewrite(TF::PartitionedCallOp call_op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc
class CheckQuantizableOps : public mlir::OpRewritePattern<TF::PartitionedCallOp> { public: explicit CheckQuantizableOps(MLIRContext* context, const QuantMethod quantization_method, const OpSet target_opset, const int min_num_elements_for_weights) : OpRewritePattern<TF::PartitionedCallOp>(context), quantization_method_(quantization_method),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc
const StringRef func_name, const TypeRange output_types, const ValueRange args) { TF::PartitionedCallOp call_op = builder.create<TF::PartitionedCallOp>( location, output_types, args, FlatSymbolRefAttr::get(builder.getStringAttr(func_name)), /*config=*/"", /*config_proto=*/"", /*executor_type=*/"");
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 21.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tpu_model_to_cpu.cc
// its original arguments except for the last element. SmallVector<Value> args = call_op.getOperands().drop_back(); rewriter.replaceOpWithNewOp<TF::PartitionedCallOp>( call_op, float_func.getResultTypes(), args, f_attr); return success(); } }; void ConvertTpuModelToCpuPass::runOnOperation() { MLIRContext* ctx = &getContext();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h
// Creates a function to wrap the section between arguments and results. // The generated function call op type will be decided by the given call_op_type // argument. Currently, it supports TF::XlaCallModuleOp and // TF::PartitionedCallOp function call op generations. SmallVector<Value, 4> LiftAsFunctionCall(OpBuilder& builder, Location location, FunctionCallOpType call_op_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 5.5K bytes - Viewed (0)