- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 10 for PartitionedCallOp (0.21 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc
class PropagateDequantizeOpIfAllowed : public OpRewritePattern<TF::PartitionedCallOp> { public: explicit PropagateDequantizeOpIfAllowed(MLIRContext* context) : OpRewritePattern<TF::PartitionedCallOp>(context) {} // Create a new dequantize op that is propagated. void createNewDequantizeOp(PatternRewriter& rewriter, TF::PartitionedCallOp original_dequantize_op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc
class CheckQuantizableOps : public mlir::OpRewritePattern<TF::PartitionedCallOp> { public: explicit CheckQuantizableOps(MLIRContext* context, const QuantMethod quantization_method, const OpSet target_opset, const int min_num_elements_for_weights) : OpRewritePattern<TF::PartitionedCallOp>(context), quantization_method_(quantization_method),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" namespace mlir { namespace quant { std::optional<TF::PartitionedCallOp> ApplyUniformQuantization( PatternRewriter& rewriter, TF::ConstOp op, tensorflow::quantization::QuantizationComponentSpec& weight_spec); } // namespace quant } // namespace mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Mar 24 07:44:40 UTC 2024 - 1.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.td
==============================================================================*/ include "mlir/IR/OpBase.td" include "mlir/IR/PatternBase.td" include "mlir/Dialect/Func/IR/FuncOps.td" // Creates a function call with TF::PartitionedCallOp and a new function to // wrap the section between arguments and results. // // The `returns` number indicates the number of results the function returns. class LiftAsTFPartitionedCall<string func_name, int returns = 1> :
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 25 00:32:20 UTC 2024 - 3.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tpu_model_to_cpu.cc
// its original arguments except for the last element. SmallVector<Value> args = call_op.getOperands().drop_back(); rewriter.replaceOpWithNewOp<TF::PartitionedCallOp>( call_op, float_func.getResultTypes(), args, f_attr); return success(); } }; void ConvertTpuModelToCpuPass::runOnOperation() { MLIRContext* ctx = &getContext();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h
// Creates a function to wrap the section between arguments and results. // The generated function call op type will be decided by the given call_op_type // argument. Currently, it supports TF::XlaCallModuleOp and // TF::PartitionedCallOp function call op generations. SmallVector<Value, 4> LiftAsFunctionCall(OpBuilder& builder, Location location, FunctionCallOpType call_op_type,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc
// than function name. std::unique_ptr<OpQuantSpec> GetTFOpQuantSpec(Operation* op) { auto spec = std::make_unique<OpQuantSpec>(); if (auto call_op = dyn_cast<TF::PartitionedCallOp>(op)) { StringRef function_name = mlir::cast<FlatSymbolRefAttr>(call_op.getFAttr()).getValue(); if (!function_name.starts_with("composite_")) { return spec; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/tf_stablehlo_pass.cc
} if (skip_resize_) { target.addLegalOp<TF::ResizeBilinearOp>(); target.addLegalOp<TF::ResizeNearestNeighborOp>(); } if (skip_partitioned_calls_) { target.addLegalOp<TF::PartitionedCallOp>(); target.addLegalOp<TF::StatefulPartitionedCallOp>(); } FrozenRewritePatternSet frozen_patterns(std::move(patterns)); if (failed(applyPartialConversion(func, target, frozen_patterns))) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 7.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_hashtable_ops_as_args.cc
auto function_uses = SymbolTable::getSymbolUses(func, &module.getBodyRegion()); if (!function_uses.has_value()) return false; for (auto& function_use : function_uses.value()) { if (!llvm::isa<TF::PartitionedCallOp, TF::StatefulPartitionedCallOp>( function_use.getUser())) { return false; } } return true; } // Returns the `shared_name` attribute value if exists. If not, returns an
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 8.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_outline_tpu_island.cc
// The function is in place in the nested module, create a call and yield in // the original island. OpBuilder builder = OpBuilder::atBlockEnd(&island_op.GetBody()); auto call_op = builder.create<mlir::TF::PartitionedCallOp>( island_op.getLoc(), func_result_types, operands.getArrayRef(), SymbolRefAttr::get( builder.getContext(), kNestedModule,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.7K bytes - Viewed (0)