Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 17 for PartitionedCallOp (0.21 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc

    class PropagateDequantizeOpIfAllowed
        : public OpRewritePattern<TF::PartitionedCallOp> {
     public:
      explicit PropagateDequantizeOpIfAllowed(MLIRContext* context)
          : OpRewritePattern<TF::PartitionedCallOp>(context) {}
    
      // Create a new dequantize op that is propagated.
      void createNewDequantizeOp(PatternRewriter& rewriter,
                                 TF::PartitionedCallOp original_dequantize_op,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/convert_launch_func_to_tf_call.cc

    namespace TFDevice {
    
    namespace {
    
    #define GEN_PASS_DEF_CONVERTLAUNCHFUNCTOTFCALLPASS
    #include "tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.h.inc"
    
    // Rewrites tf_device::LaunchFuncOp into TF::PartitionedCallOp.
    struct ConvertLaunchFuncToTFCallPass
        : public impl::ConvertLaunchFuncToTFCallPassBase<
              ConvertLaunchFuncToTFCallPass> {
      void runOnOperation() override;
    };
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 31 21:08:09 UTC 2023
    - 2.8K bytes
    - Viewed (0)
  3. tensorflow/c/experimental/saved_model/core/revived_types/flat_tensor_function.cc

      // In graph mode, we create a PartitionedCallOp instead:
      // https://github.com/tensorflow/tensorflow/blob/66668ec0ca432e2f38a575b814f45b6d299d01ed/tensorflow/python/eager/function.py#L573
    
      // TODO(bmzhao): After discussing with Allen, we should execute this via a
      // PartitionedCallOp for compatibility with "tooling that assumes functions in
      // graphs are PartitionedCallOps".
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 14 19:16:58 UTC 2023
    - 3.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc

    class CheckQuantizableOps
        : public mlir::OpRewritePattern<TF::PartitionedCallOp> {
     public:
      explicit CheckQuantizableOps(MLIRContext* context,
                                   const QuantMethod quantization_method,
                                   const OpSet target_opset,
                                   const int min_num_elements_for_weights)
          : OpRewritePattern<TF::PartitionedCallOp>(context),
            quantization_method_(quantization_method),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/executor_tpuv1_outline_island/executor_tpuv1_outline_tpu_island.mlir

    // skipped from outlining for both single-core and replicated case (i.e., the
    // `_tpu_v1_compat_outlined` module must be empty and no `PartitionedCallOp` is
    // created). Also check that `_skip_island_outlining` attribute is removed.
    
    // CHECK-LABEL: @func5
    // CHECK-NOT: _skip_island_outlining
    // CHECK-NOT: PartitionedCallOp
    // CHECK: _tpu_v1_compat_outlined {
    // CHECK-NEXT: }
    func.func @func5() attributes {_skip_island_outlining = true} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 04 03:54:58 UTC 2022
    - 4.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h

    #include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
    #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
    
    namespace mlir {
    namespace quant {
    
    std::optional<TF::PartitionedCallOp> ApplyUniformQuantization(
        PatternRewriter& rewriter, TF::ConstOp op,
        tensorflow::quantization::QuantizationComponentSpec& weight_spec);
    
    }  // namespace quant
    }  // namespace mlir
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Mar 24 07:44:40 UTC 2024
    - 1.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/executor_tpuv1_outline_island/while_op.mlir

        %0 = "tf.some_op"(%arg0) : (tensor<i32>) -> tensor<i32>
        func.return %0 : tensor<i32>
      }
      func.func @while_cond_without_cluster_attr(%arg0: tensor<i32>) -> tensor<i1> {
        %0 = "tf.PartitionedCallOp"(%arg0) { f = @callee_func} : (tensor<i32>) -> tensor<i1>
        func.return %0 : tensor<i1>
      }
      func.func @callee_func(%arg0: tensor<i32>) -> tensor<i1> {
        %0 = "tf.some_op"(%arg0) : (tensor<i32>) -> tensor<i1>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 04 03:54:58 UTC 2022
    - 2.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op_test.cc

      quant_spec.set_quantization_component(
          QuantizationComponentSpec::COMPONENT_WEIGHT);
      quant_spec.set_tensor_type(QuantizationComponentSpec::TENSORTYPE_INT_8);
    
      std::optional<TF::PartitionedCallOp> dequantize_op = ApplyUniformQuantization(
          pattern_rewriter, cast<TF::ConstOp>(value.getDefiningOp()), quant_spec);
      EXPECT_TRUE(dequantize_op.has_value());
      EXPECT_EQ(dequantize_op.value().func().getName().str(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_inline_tpu_island.cc

      Operation *nested_module = symbol_table.lookup(kNestedModule);
      if (!nested_module) return;
    
      InlinerInterface inliner(&getContext());
      auto walk_result = getOperation().walk([&](TF::PartitionedCallOp call_op) {
        if (!call_op.getF().getRootReference().getValue().starts_with(
                kNestedModule))
          return WalkResult::advance();
        // This is a call we need to inline!
        LLVM_DEBUG(llvm::dbgs()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 19 08:06:04 UTC 2023
    - 4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/lift_as_function_call.td

    ==============================================================================*/
    
    include "mlir/IR/OpBase.td"
    include "mlir/IR/PatternBase.td"
    include "mlir/Dialect/Func/IR/FuncOps.td"
    
    // Creates a function call with TF::PartitionedCallOp and a new function to
    // wrap the section between arguments and results.
    //
    // The `returns` number indicates the number of results the function returns.
    class LiftAsTFPartitionedCall<string func_name, int returns = 1> :
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 25 00:32:20 UTC 2024
    - 3.4K bytes
    - Viewed (0)
Back to top