Search Options

Results per page
Sort
Preferred Languages
Advance

Results 71 - 80 of 158 for getOperands (0.31 sec)

  1. tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_sharding_identification_pass.cc

      //
      // Sharding configurations are added to the tf_device.ClusterFunc as an
      // attribute and the function as an argument attribute.
      for (auto operand_and_arg :
           llvm::zip(cluster_func.getOperands(), function_block.getArguments())) {
        Value operand = std::get<0>(operand_and_arg);
        BlockArgument arg = std::get<1>(operand_and_arg);
    
        if (auto operand_sharding = GetXlaShardingFromOperand(operand)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 02:01:13 UTC 2024
    - 28.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h

    // type if provided. If there is no user or user of type, return nullptr.
    template <typename T = Operation*>
    Operation* FindOperandOfType(Operation* op) {
      for (Value operand_value : op->getOperands()) {
        if (isa<T>(operand_value.getDefiningOp())) {
          return operand_value.getDefiningOp();
        }
      }
      return nullptr;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/utils/utils.h

    using mlir::Operation;
    using mlir::ShapedType;
    using mlir::Value;
    
    // Returns true if all tensor value in `values` has static shape and same shape.
    inline bool OpHasSameStaticShapes(Operation* op) {
      auto values = op->getOperands();
      int operand_num = 0;
      ArrayRef<int64_t> shape;
      for (Value value : values) {
        auto shaped_type = value.getType().dyn_cast<ShapedType>();
        if (!shaped_type || !shaped_type.hasStaticShape()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 11.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc

                 llvm::Twine(calib_opts_.calibration_method()))
                    .str());
          }
        } else {
          // Quantize output of fully quantizable composite functions.
          for (Value input : op->getOperands()) {
            auto defining_op = input.getDefiningOp();
            std::optional<StringRef> composite_function_name =
                GetCompsiteFunctionName(defining_op);
            if (!composite_function_name.has_value()) continue;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 14.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/prepare_tpu_computation_for_tf_export.cc

          }
    
          rewriter.setInsertionPoint(cloned_func.getBody().front().getTerminator());
          rewriter.create<TF::_XlaSendFromHostOp>(
              func.getLoc(),
              cloned_func.getBody().front().getTerminator()->getOperands(),
              /*dynamic_key=*/dynamic_key, op.getRecvKeyAttr(),
              /*device_ordinal=*/rewriter.getI64IntegerAttr(0),
              rewriter.getStringAttr("TPU"));
        }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc

    // The current implementation is based on the number of operands.
    static bool PreferResultScale(Operation* op) {
      int float_operands = 0;
      for (auto operand : op->getOperands()) {
        if (auto operand_type = dyn_cast<ShapedType>(operand.getType())) {
          if (isa<FloatType>(operand_type.getElementType())) {
            if (++float_operands > 1) return true;
          }
        }
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 43.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_types.cc

      // UniformQuantized Ops are considered legal if its qint operands and
      // results are connected to TF CastOp.
      return op && llvm::all_of(op->getResults(), IsQintValueQintToIntCast) &&
             llvm::all_of(op->getOperands(), IsQintValueDefinedByIntToQintCast);
    }
    
    bool IsCastOpLegal(TF::CastOp cast_op) {
      // Consider qint <-> qint casts illegal.
      if (IsIllegalType(cast_op.getSrcT()) && IsIllegalType(cast_op.getDstT())) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_ops.cc

      std::string custom_option_buffer;
      CreateFlexOpCustomOptions(op_name, node_def_str, custom_option_buffer);
      auto flex_op = builder.create<TFL::CustomOp>(
          op->getLoc(), op->getResultTypes(), op->getOperands(), flex_op_name,
          CustomOptionForFlexOp(&builder, custom_option_buffer));
      op->replaceAllUsesWith(flex_op);
      op->erase();
      return true;
    }
    
    // Sets the "no_fallback" attribute.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/internal/passes/xla_broadcast.cc

      if (num_cores_per_replica != 1) return success();
    
      llvm::SetVector<Value> bcasts;
      cluster->walk([&](Operation* op) {
        if (op == cluster) return WalkResult::advance();
        for (auto operand : op->getOperands()) {
          Operation* scope = operand.getParentBlock()->getParentOp();
          if (scope->isProperAncestor(replicate)) {
            bcasts.insert(operand);
          }
        }
        return WalkResult::advance();
      });
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 18:52:07 UTC 2024
    - 13.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc

        return RefineTypeForPassThroughOperands(
            op, iter_sink.getOperands().drop_front().take_front(),
            iter_source.getResults());
      }
      if (auto launch_op = dyn_cast<tf_device::LaunchOp>(op)) {
        auto terminator = launch_op.GetBody().getTerminator();
        return RefineTypeForPassThroughOperands(op, terminator->getOperands(),
                                                op->getResults());
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 08 07:28:49 UTC 2024
    - 134.1K bytes
    - Viewed (0)
Back to top