Search Options

Results per page
Sort
Preferred Languages
Advance

Results 81 - 89 of 89 for getOperands (0.13 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/transforms/fold_constants_to_subgraph.cc

            // Locate the argument position of the use.
            int argument_index = -1;
            for (int i = 0; i < consumer_call.getNumOperands(); ++i) {
              if (consumer_call.getOperand(i) == op->getResult(0)) {
                argument_index = i;
                break;
              }
            }
    
            // Copy the const into the consumer func and replace their usages.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/transforms/fuse_convolution_pass.cc

        multiplier = isa<mhlo::ConstantOp>(bcast_or_const_op)
                         ? dyn_cast_or_null<mhlo::ConstantOp>(bcast_or_const_op)
                         : bcast_or_const_op->getOperand(0)
                               .getDefiningOp<mhlo::ConstantOp>();
        if (multiplier == nullptr) {
          return failure();
        }
    
        auto result_type = OpTrait::util::getBroadcastedType(filter.getType(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 22:21:19 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc

          // This op is guaranteed to be a constant as ODS checks IsConstTensor.
          // Check if the number of elements meets the requirement.
          int current_num_elements =
              mlir::cast<ShapedType>(call_op.getOperand(idx).getType())
                  .getNumElements();
          if (current_num_elements < min_num_elements_for_weights_) {
            call_op.emitRemark("Quantization is skipped for ")
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/lower_globals_to_ml_program.cc

      if (v.getDefiningOp()->getNumOperands() == 1) {
        // If the value is originating from an unary op, assume it's something
        // simple like "cast" and keep tracing.
        return traceUpwardsToArgument(v.getDefiningOp()->getOperand(0), seen, out);
      } else {
        // Typically a tf.VarHandle op.
        return v.getDefiningOp()->emitOpError("Non constant predecessor");
      }
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/op_stat_pass.cc

          // Use rhs operand to detect types for dynamic range quantizable ops.
          Value value_for_deducing_op_type =
              (dyn_cast_or_null<DynamicRangeQuantizedOpInterface>(op))
                  ? op->getOperand(1)
                  : op->getResult(0);
          ShapedType value_shaped_type = mlir::dyn_cast_or_null<ShapedType>(
              value_for_deducing_op_type.getType());
          if (value_shaped_type != nullptr) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc

      }
    
      int num_return_operands = terminator->getNumOperands();
      new_output_types.reserve(num_return_operands);
      for (int i = 0; i != num_return_operands; ++i) {
        auto returned_value = terminator->getOperand(i);
        Type returned_type = returned_value.getType();
        Operation* returned_op = returned_value.getDefiningOp();
        if (returned_op && llvm::isa<DequantizeOp>(returned_op)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h

    bool GetValueAsConstant(Value val, AttrT &attr) {
      while (auto result = mlir::dyn_cast<OpResult>(val)) {
        Operation *op = result.getOwner();
        if (!isa<IdentityOp>(op) && !isa<IdentityNOp>(op)) break;
        val = op->getOperand(result.getResultNumber());
      }
      return matchPattern(val, m_Constant(&attr));
    }
    
    // Checks if both compilation and replication attributes are present in the
    // operation, and if their values are valid.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 19:47:48 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc

        quant::AccumulatorScaleFunc func) {
      std::vector<quant::QuantizedType> non_bias_types;
      non_bias_types.reserve(non_biases.size());
      for (int non_bias : non_biases) {
        Operation *non_bias_define = op->getOperand(non_bias).getDefiningOp();
        if (auto dequant = llvm::dyn_cast<TFL::DequantizeOp>(non_bias_define)) {
          auto non_bias_type = mlir::cast<TensorType>(dequant.getInput().getType());
          auto non_bias_ele_type =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_avg_pool.cc

    TorchAvgPoolData GetTorchAvgPoolData(CompositeOp op) {
      auto composite_attrs = op.getCompositeAttributes();
      TorchAvgPoolData data;
    
      auto op_type = mlir::cast<RankedTensorType>(op.getOperand(0).getType());
    
      data.n = op_type.getShape()[0];
      data.c = op_type.getShape()[1];
      data.h_in = op_type.getShape()[2];
      data.w_in = op_type.getShape()[3];
    
      std::vector<int32_t> kernel_size;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:16:05 UTC 2024
    - 9.2K bytes
    - Viewed (0)
Back to top