Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 178 for getDefiningOp (0.27 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables.cc

        mlir::TF::AssignVariableOp assign_var_op, BundleWriter& bundle_writer) {
      auto resource_operand = assign_var_op.getOperand(0);
      auto var_handle_op =
          llvm::dyn_cast<mlir::TF::VarHandleOp>(resource_operand.getDefiningOp());
      if (!var_handle_op) {
        assign_var_op->emitRemark(
            "Operand idx 0 is not a tf.VarHandleOp. The initializing tensor is not "
            "saved to checkpoint.");
        return "";
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Feb 26 03:36:55 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc

                                 Value multiplier) {
      auto dq_op = value.getDefiningOp<quantfork::DequantizeCastOp>();
      if (!dq_op) {
        auto mul_op = builder.create<TF::MulOp>(loc, value, multiplier);
        return mul_op.getResult();
      }
      auto q_op = dq_op.getArg().getDefiningOp<quantfork::QuantizeCastOp>();
      if (!q_op) return {};
    
      Value float_value = q_op.getArg();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc

        // Operand must be defined by a transpose op.
        TransposeOp transpose =
            dyn_cast_or_null<TransposeOp>(operand.get().getDefiningOp());
        if (!transpose) return;
    
        // With permutation defined by constant operation.
        ConstOp perm =
            dyn_cast_or_null<ConstOp>(transpose.getOperand(1).getDefiningOp());
        if (!perm) return;
    
        // With the same permutation indices.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 19.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc

        for (const auto& operand : same_scale_op->getOperands()) {
          auto dq_op = dyn_cast_or_null<quantfork::DequantizeCastOp>(
              operand.getDefiningOp());
          if (!dq_op) continue;
    
          Operation* preceding_op = dq_op.getArg().getDefiningOp();
          if (!preceding_op) continue;
    
          // Check whether the preceding op is a quantized composite function.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.cc

      LogicalResult matchAndRewrite(quantfork::DequantizeCastOp op,
                                    PatternRewriter& rewriter) const override {
        auto input_op = op.getArg().getDefiningOp();
        if (auto q = llvm::dyn_cast_or_null<quantfork::QuantizeCastOp>(input_op)) {
          if (!q->getAttr(kVolatileOpAttrName)) return failure();
    
          if (remove_volatile_ops_type == kPreserveInputsAndOutputs) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/odml_converter/transforms/shlo_simplify.td

    def CloneF32ElementsAttrWithOnes
      : NativeCodeCall<"DenseElementsAttr::get($0.getType().cast<ShapedType>(), (float)1.0)">;
    
    def NotConstant : Constraint<
        CPred<"$0.isa<BlockArgument>() || !llvm::isa<stablehlo::ConstantOp>($0.getDefiningOp())">,
        "Is not a constant.">;
    
    def : Pat<(StableHLO_DivOp $l,
                (StableHLO_ConstantOp:$divisor FloatElementsAttr<32>:$cst)),
              (StableHLO_MulOp $l,
                (StableHLO_DivOp
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 03:05:20 UTC 2024
    - 1.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/transforms/tflite_legalize_hlo_patterns.td

                                (CreateTFLCastToInt32Op (TFL_ConstOp $perm)))>;
    
    
    def ConvertDotGeneralOp : NativeCodeCall<"ConvertDotGeneralOp($_builder, "
                                                   "$0.getDefiningOp())">;
    def LegalizeDotGeneral: Pat<(MHLO_DotGeneralOp:$old_value
                   $lhs,
                   $rhs,
                   $dot_dimension_numbers, $precision_config),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 18 18:07:41 UTC 2023
    - 1.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc

      Operation* xla_call_module_op =
          FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
      Operation* filter_dcast_op =
          xla_call_module_op->getOperand(1).getDefiningOp();
      Operation* filter_qcast_op = filter_dcast_op->getOperand(0).getDefiningOp();
      ASSERT_NE(filter_qcast_op, nullptr);
      EXPECT_TRUE(isa<quantfork::QuantizeCastOp>(filter_qcast_op));
      EXPECT_TRUE(isa<quantfork::DequantizeCastOp>(filter_dcast_op));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/tpu_annotate_dynamic_shape_inputs.cc

    // returns the owner of the Block.
    Operation* GetOpOfValue(Value value) {
      if (auto block_arg = mlir::dyn_cast<BlockArgument>(value))
        return block_arg.getOwner()->getParentOp();
    
      return value.getDefiningOp();
    }
    
    void TPUAnnotateDynamicShapeInputsPass::runOnOperation() {
      getOperation().walk([&](tf_device::ClusterFuncOp cluster_func_op) {
        Builder builder(cluster_func_op->getContext());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tpu_model_to_cpu.td

    // Combines the two variadic arguments ($in_tensors and $captured_tensors).
    def GetBatchFunctionOpArgOperands:
        NativeCodeCall<"cast<TF::BatchFunctionOp>($0[0].getDefiningOp()).getArgOperands()">;
    
    // Replaces `TF_BatchFunctionOp` into `TF_PartitionedCallOp` that calls the
    // same $f. This may be required, for example, when inlining is desired,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 18:58:35 UTC 2024
    - 1.6K bytes
    - Viewed (0)
Back to top