Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 139 for input_type (0.13 sec)

  1. tensorflow/compiler/mlir/tf2xla/internal/utils/test_metadata_config.cc

      for (auto input_type : func_type.getInputs()) {
        tensorflow::TensorShape tensor_shape;
        xla::Shape xla_shape = xla::TypeToShape(input_type);
        TF_RETURN_IF_ERROR(tensorflow::TensorShape::BuildTensorShape(
            xla_shape.dimensions(), &tensor_shape));
        arg_shapes.emplace_back(tensor_shape);
    
        DataType dtype;
        TF_RETURN_IF_ERROR(ConvertToDataType(input_type, &dtype));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 23:59:33 UTC 2024
    - 3.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h

        int quant_dim = -1;
        auto input_type = mlir::cast<ShapedType>(input.getType());
        if (PerAxis) {
          if (!input_type.hasRank()) {
            tf_op.emitError("The input should have known rank for per-channel op.");
            return failure();
          }
          // This is a special case that the quant_dim is the last dimensions.
          quant_dim = input_type.getRank() - 1;
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_utils.cc

      output_shape[1] = composite_result_shape[2];
      output_shape[2] = composite_result_shape[3];
      output_shape[3] = composite_result_shape[1];
    
      auto input_type = mlir::cast<ShapedType>(old_op->getOperand(0).getType());
    
      return RankedTensorType::get(output_shape, input_type.getElementType());
    }
    }  // namespace odml
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 18:33:05 UTC 2024
    - 3.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op.cc

          OP_REQUIRES(context, context->input_type(i * 3) == DT_FLOAT,
                      absl::AbortedError("The input `min` must have float type."));
          OP_REQUIRES(context, context->input_type(i * 3 + 1) == DT_FLOAT,
                      absl::AbortedError("The input `max` must have float type."));
          OP_REQUIRES(
              context, context->input_type(i * 3 + 2) == DT_INT64,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 13 01:31:23 UTC 2024
    - 8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc

    LogicalResult CheckFusableLayerNormalizedLstmCellSimple(
        func::FuncOp lstm_func) {
      for (int i = 0; i < 5; ++i) {
        auto input = lstm_func.getArgument(i);
        auto input_type = mlir::dyn_cast_or_null<RankedTensorType>(input.getType());
        if (!input_type) {
          lstm_func.emitWarning(
              "we cannot fuse this lstm func because all the inputs have not "
              "ranked tensor type.");
          return failure();
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc

                                << n_init_values << ")";
      }
    
      auto input_ty_0 = inputs_ty[0].cast<ShapedType>();
      if (input_ty_0.hasStaticShape()) {
        for (int i = 1; i < n_inputs; ++i) {
          auto input_ty_i = inputs_ty[i].cast<ShapedType>();
          if (input_ty_i.hasStaticShape() &&
              input_ty_i.getShape() != input_ty_0.getShape()) {
            return op.emitOpError()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 170.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h

    // Quantizes the input model represented as `model_buffer` and writes the result
    // to the `output_buffer`. Both `model_buffer` and `output_buffer` should be a
    // valid FlatBuffer format for Model supported by TFLite.
    //
    // The `input_type`, `output_type` and `inference_type` can be float32 / qint8 /
    // int8 / int16.
    //
    // Returns a partially quantized model if `fully_quantize` is false. Returns a
    // non-OK status if the quantization fails.
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 2.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/utils/lstm_utils.cc

                                          Operation** result) {
      auto input_type = mlir::cast<RankedTensorType>(input.getType());
      SmallVector<int64_t, 4> output_shape;
      int size_of_splits;
      if (input_type.getRank() < axis || axis < 0) return failure();
      for (int i = 0; i < input_type.getRank(); ++i) {
        int64_t dim = input_type.getDimSize(i);
        if (i == axis) {
          if (dim % splits != 0) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 36.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tfr/ir/tfr_ops.cc

            !result_type.isa<IntegerType>()) {
          return nullptr;
        }
        auto input_itype = input.getType().cast<IntegerType>();
        auto result_itype = result_type.cast<IntegerType>();
        if (input_itype.getWidth() == result_itype.getWidth()) return nullptr;
        if (input_itype.getWidth() > result_itype.getWidth()) {
          return builder.create<arith::TruncIOp>(conversion_loc, result_type,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Nov 21 16:55:41 UTC 2023
    - 38.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc

                   << ", input_inference_type: "
                   << tflite::EnumNameTensorType(input_type)
                   << ", output_inference_type: "
                   << tflite::EnumNameTensorType(output_type) << "\n";
      mlir::Builder mlir_builder(&context);
      mlir::Type input_mlir_type =
          tflite::ConvertElementType(input_type, mlir_builder);
      mlir::Type output_mlir_type =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 6.3K bytes
    - Viewed (0)
Back to top