Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 139 for input_type (0.15 sec)

  1. tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.cc

        return ExpressedToQuantizedConverter{input_type, element_type};
      }
      // Supported primitive type (which just is the expressed type).
      if (isQuantizablePrimitiveType(input_type))
        return ExpressedToQuantizedConverter{input_type, input_type};
      // Unsupported.
      return ExpressedToQuantizedConverter{input_type, nullptr};
    }
    
    Type ExpressedToQuantizedConverter::convert(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.cc

      auto input = splitv_op.getValue();
      auto input_type = mlir::dyn_cast_or_null<RankedTensorType>(input.getType());
      if (!input_type || !input_type.hasRank()) return failure();
    
      for (auto result : splitv_op.getResults()) {
        auto result_type = mlir::dyn_cast<RankedTensorType>(result.getType());
        if (result_type == nullptr) return failure();
      }
    
      const int64_t rank = input_type.getRank();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 25.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.cc

        if (input_op && input_op == from_graph.getOperation()) {
          auto input_type =
              mlir::dyn_cast_or_null<RankedTensorType>(input.getType());
          if (input_type == nullptr || !input_type.hasStaticShape()) continue;
          // Quantized type does not support getSizeInBits.
          if (IsQUI8Type(input_type) || IsQI8Type(input_type)) {
            total_size_transferred += input_type.getNumElements() * 8;
          } else {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/utils/arithmetic_count_util.h

        int64_t total_count = 0;
        for (auto input : op->getOperands()) {
          auto input_type =
              mlir::dyn_cast_or_null<mlir::RankedTensorType>(input.getType());
          if (!input_type || !input_type.hasStaticShape()) {
            return false;
          }
          total_count += input_type.getNumElements();
        }
        *count = total_count;
        return true;
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 3.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/unfuse_batch_norm_pass.cc

        auto inputs = bn_op.getOperand();
        auto input_type = mlir::dyn_cast<RankedTensorType>(inputs.getType());
        if (!input_type) {
          return failure();
        }
        auto feature_index = bn_op.getFeatureIndex();
    
        // Compute mean
        int64_t input_last_dim = input_type.getRank() - 1;
        auto dims_type = RankedTensorType::get(/*shape=*/{input_last_dim},
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc

     public:
      MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ModifyIONodesPass)
    
      explicit ModifyIONodesPass() {}
      explicit ModifyIONodesPass(mlir::Type input_type, mlir::Type output_type) {
        this->input_type = input_type;
        this->output_type = output_type;
      }
    
      void runOnOperation() override;
    
     private:
      // Assign the io types from the command line flag. This is only required for
      // tests.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/ir/tfl_ops.cc

      auto input_type = operands[0].getType().dyn_cast<ShapedType>();
      if (!input_type || !input_type.hasRank()) {
        // If input is unranked, then so is output.
        inferredReturnTypes.assign(
            num_value, UnrankedTensorType::get(input_type.getElementType()));
        return success();
      }
    
      if (input_type.hasStaticShape() && input_type.getNumElements() <= 0) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 169.2K bytes
    - Viewed (0)
  8. tensorflow/c/kernels/ops/bitcast.cc

                                TF_ShapeHandle* shape, TF_DataType input_type,
                                TF_DataType output_type, TF_Status* status) {
      size_t input_type_size = TF_DataTypeSize(input_type);
      size_t output_type_size = TF_DataTypeSize(output_type);
    
      if (input_type_size == 0 || output_type_size == 0) {
        std::ostringstream err;
        err << "Cannot bitcast type " << input_type << " to " << output_type
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 07:51:50 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/experimental/tac/common/utils.cc

      bool uint8_type_observed = false;
      for (auto& input : op->getOpOperands()) {
        auto input_type = input.get().getType();
        if (IsF32ShapedType(input_type)) {
          float_type_observed = true;
        } else if (IsQI8Type(input_type)) {
          int8_type_observed = true;
        } else if (IsQUI8Type(input_type)) {
          uint8_type_observed = true;
        }
      }
    
      // We should not observe both uint8 & int8.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 06 05:37:07 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc

        SmallVector<Value, 4> dequantized_inputs;
        for (auto& input : op->getOpOperands()) {
          auto input_type = input.get().getType();
          if (IsQI8Type(input_type) || IsQUI8Type(input_type) ||
              IsQI32Type(input_type)) {
            auto dequantized_input_type =
                mlir::quant::QuantizedType::castToExpressedType(input_type);
            builder->setInsertionPoint(op);
            auto dequantize_op = builder->create<TFL::DequantizeOp>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
Back to top