Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 55 for getDefiningOp (0.37 sec)

  1. tensorflow/compiler/mlir/lite/quantization/tensorflow/tf_to_quant.cc

        DenseFPElementsAttr min_value, max_value;
        if (auto id1 = dyn_cast_or_null<TF::IdentityOp>(min.getDefiningOp())) {
          id1.replaceAllUsesWith(id1.getInput());
          min = tf_op.getMin();
          rewriter.eraseOp(id1);
        }
        if (auto id2 = dyn_cast_or_null<TF::IdentityOp>(max.getDefiningOp())) {
          id2.replaceAllUsesWith(id2.getInput());
          max = tf_op.getMax();
          rewriter.eraseOp(id2);
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/split_merged_operands.cc

        // case is useful for float16 quantization. Since all ops have been
        // legalized to tflite ops, so we only care about ConstOp or QConstOp or
        // mlir constant op.
        Operation* input_op = operand.getDefiningOp();
        if (input_op == nullptr) return failure();
    
        Attribute attr;
        if (matchPattern(input_op, m_Constant(&attr))) {
          // Constant case.
          builder->setInsertionPoint(op);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h

                      AttrType &max_value) const {
        Value min = tf_op.getMin(), max = tf_op.getMax();
        if (auto min_id = min.getDefiningOp<TF::IdentityOp>()) {
          min = min_id.getInput();
        }
        if (auto max_id = max.getDefiningOp<TF::IdentityOp>()) {
          max = max_id.getInput();
        }
    
        if (!matchPattern(min, m_Constant(&min_value))) {
          return false;
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_utils.td

    #define COMPOSITE_UTILS_TD
    
    include "mlir/IR/PatternBase.td"
    
    // See the function doc in the header file.
    def GetNhwcReturnTypeFromNchw: NativeCodeCall<
      "GetNhwcReturnTypeFromNchw((*$0.begin()).getDefiningOp())">;
    
    // When given a DenseIntElementsAttr containing I64 elements, this extracts
    // one I32IntegerAttr from the given index.
    class GetI32At<int index>: NativeCodeCall<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:05:30 UTC 2024
    - 2.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/quantize_variables.cc

        auto assign_variable_op =
            dyn_cast_or_null<AssignVariableOp>(var_handle_user);
        if (!assign_variable_op) continue;
        auto value_op = assign_variable_op.getValue().getDefiningOp();
        auto dq_op = dyn_cast_or_null<DequantizeOp>(value_op);
        if (!dq_op || ref_qtype) continue;
        ref_qtype = dq_op.getInput().getType();
      }
      return ref_qtype;
    }
    
    class QuantizeVariablesPass
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.cc

      resource_infos_[resource].potentially_written = true;
      auto* operation = resource.getDefiningOp();
      if (operation && isa<TF::VarHandleOp>(operation)) {
        mutable_variables_.insert(GetResourceKey(operation));
      }
    }
    
    bool ResourceAnalyzer::IsPotentiallyWritten(Value resource) const {
      assert(IsResource(resource));
      auto* operation = resource.getDefiningOp();
      if (operation && isa<TF::VarHandleOp>(operation))
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 15 09:04:13 UTC 2024
    - 8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize_layout.cc

        Value pad_input = pad_op.getOperand();
        RankedTensorType pad_type = pad_op.getType().cast<RankedTensorType>();
    
        auto transpose_op = pad_input.getDefiningOp<stablehlo::TransposeOp>();
        if (!transpose_op || !transpose_op->hasOneUse()) return failure();
        Value transpose_input = transpose_op.getOperand();
    
        ArrayRef<int64_t> transpose_perm = transpose_op.getPermutation();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 21:59:06 UTC 2024
    - 8.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/fold_constant_transpose.cc

      LogicalResult match(mlir::stablehlo::TransposeOp op) const override {
        Value operand = op.getOperand();
        auto const_op =
            dyn_cast_or_null<mlir::stablehlo::ConstantOp>(operand.getDefiningOp());
        if (!const_op) return failure();
    
        // Only support float tensors.
        auto tensor_type = mlir::dyn_cast_or_null<TensorType>(const_op.getType());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td

    def FuseAffineOpAndMul : Pat<
      (TF_MulOp
        (SupportedAffineOpMatcher $conv_out, $input, $weight),
        (TF_ConstOp:$mul_rhs IsFloatElementsAttr:$mul_rhs_value)),
      (CloneOpWithReplacedOperands
            (GetDefiningOp $conv_out),
            $input,
            (MultiplyFakeQuantValue $weight,
              (MakeOneDimValueBroadcastable $mul_rhs, $weight))),
      [(HasOneUse $conv_out),
       (HasRankOf<1> $mul_rhs_value),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_save_op.cc

        auto var_handle_op =
            dyn_cast<TF::VarHandleOp>(resource_operand.getDefiningOp());
        if (!var_handle_op) continue;
    
        Value assigned_value_operand = assign_variable_op.getOperand(1);
        auto const_op =
            dyn_cast<TF::ConstOp>(assigned_value_operand.getDefiningOp());
        if (!const_op) continue;
    
        var_handle_ops.emplace_back(var_handle_op);
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 9.5K bytes
    - Viewed (0)
Back to top