Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 55 for getDefiningOp (0.43 sec)

  1. tensorflow/compiler/mlir/tensorflow/analysis/resource_dataflow.cc

          ResourceConstructingOps result(global_tensor);
          return result;
        }
      } else if (auto vh = dyn_cast<TF::VarHandleOp>(value.getDefiningOp())) {
        return ResourceConstructingOps(vh);
      } else if (auto it = dyn_cast<TF::IteratorOp>(value.getDefiningOp())) {
        return ResourceConstructingOps(it);
      }
      return ResourceConstructingOps();
    }
    
    ResourceConstructingOps ResourceConstructingOps::join(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/lower_globals_to_ml_program.cc

      }
    
      if (v.getDefiningOp()->getNumOperands() == 1) {
        // If the value is originating from an unary op, assume it's something
        // simple like "cast" and keep tracing.
        return traceUpwardsToArgument(v.getDefiningOp()->getOperand(0), seen, out);
      } else {
        // Typically a tf.VarHandle op.
        return v.getDefiningOp()->emitOpError("Non constant predecessor");
      }
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/odml_converter/folders.cc

      static std::optional<FoldAdaptor> Create(Operation* operation) {
        auto foldable_opr = [](Value val) -> bool {
          return !llvm::isa<BlockArgument>(val) &&
                 llvm::isa<stablehlo::ConstantOp>(val.getDefiningOp());
        };
        if (!llvm::all_of(operation->getOperands(), foldable_opr)) {
          return std::nullopt;
        }
        return FoldAdaptor(operation);
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 06:11:55 UTC 2024
    - 4.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables.cc

        mlir::TF::AssignVariableOp assign_var_op, BundleWriter& bundle_writer) {
      auto resource_operand = assign_var_op.getOperand(0);
      auto var_handle_op =
          llvm::dyn_cast<mlir::TF::VarHandleOp>(resource_operand.getDefiningOp());
      if (!var_handle_op) {
        assign_var_op->emitRemark(
            "Operand idx 0 is not a tf.VarHandleOp. The initializing tensor is not "
            "saved to checkpoint.");
        return "";
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Feb 26 03:36:55 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.cc

      LogicalResult matchAndRewrite(quantfork::DequantizeCastOp op,
                                    PatternRewriter& rewriter) const override {
        auto input_op = op.getArg().getDefiningOp();
        if (auto q = llvm::dyn_cast_or_null<quantfork::QuantizeCastOp>(input_op)) {
          if (!q->getAttr(kVolatileOpAttrName)) return failure();
    
          if (remove_volatile_ops_type == kPreserveInputsAndOutputs) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/odml_converter/transforms/shlo_simplify.td

    def CloneF32ElementsAttrWithOnes
      : NativeCodeCall<"DenseElementsAttr::get($0.getType().cast<ShapedType>(), (float)1.0)">;
    
    def NotConstant : Constraint<
        CPred<"$0.isa<BlockArgument>() || !llvm::isa<stablehlo::ConstantOp>($0.getDefiningOp())">,
        "Is not a constant.">;
    
    def : Pat<(StableHLO_DivOp $l,
                (StableHLO_ConstantOp:$divisor FloatElementsAttr<32>:$cst)),
              (StableHLO_MulOp $l,
                (StableHLO_DivOp
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 03:05:20 UTC 2024
    - 1.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc

      Operation* xla_call_module_op =
          FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
      Operation* filter_dcast_op =
          xla_call_module_op->getOperand(1).getDefiningOp();
      Operation* filter_qcast_op = filter_dcast_op->getOperand(0).getDefiningOp();
      ASSERT_NE(filter_qcast_op, nullptr);
      EXPECT_TRUE(isa<quantfork::QuantizeCastOp>(filter_qcast_op));
      EXPECT_TRUE(isa<quantfork::DequantizeCastOp>(filter_dcast_op));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/tpu_annotate_dynamic_shape_inputs.cc

    // returns the owner of the Block.
    Operation* GetOpOfValue(Value value) {
      if (auto block_arg = mlir::dyn_cast<BlockArgument>(value))
        return block_arg.getOwner()->getParentOp();
    
      return value.getDefiningOp();
    }
    
    void TPUAnnotateDynamicShapeInputsPass::runOnOperation() {
      getOperation().walk([&](tf_device::ClusterFuncOp cluster_func_op) {
        Builder builder(cluster_func_op->getContext());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tpu_model_to_cpu.td

    // Combines the two variadic arguments ($in_tensors and $captured_tensors).
    def GetBatchFunctionOpArgOperands:
        NativeCodeCall<"cast<TF::BatchFunctionOp>($0[0].getDefiningOp()).getArgOperands()">;
    
    // Replaces `TF_BatchFunctionOp` into `TF_PartitionedCallOp` that calls the
    // same $f. This may be required, for example, when inlining is desired,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 18:58:35 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.cc

    int64_t GetTransferredTensorBytes(func::CallOp from_graph,
                                      func::CallOp to_graph) {
      int64_t total_size_transferred = 0;
      for (auto input : to_graph.getOperands()) {
        Operation* input_op = input.getDefiningOp();
        if (input_op && input_op == from_graph.getOperation()) {
          auto input_type =
              mlir::dyn_cast_or_null<RankedTensorType>(input.getType());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.3K bytes
    - Viewed (0)
Back to top