Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 14 for f16 (2.21 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_weight.cc

          // Create new ConstantOp-ConvertOp-Operation sequences. At this moment,
          // old ConstantOp is guaranteed to have one F32->F16 convert op regardless
          // of its number of users.
          rewriter.setInsertionPointAfter(op);
          // create new F16 constant op in that location
          ConstantOp new_const = rewriter.create<ConstantOp>(
              op->getLoc(), new_result_type, new_value_attr);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/const-fold.mlir

      %5 = "tfl.mul"(%0, %1) {fused_activation_function = "NONE"} : (tensor<  f16>, tensor<  f16>) -> tensor<  f16>
      %6 = "tfl.mul"(%0, %3) {fused_activation_function = "NONE"} : (tensor<  f16>, tensor<4xf16>) -> tensor<4xf16>
      %7 = "tfl.mul"(%2, %1) {fused_activation_function = "NONE"} : (tensor<4xf16>, tensor<  f16>) -> tensor<4xf16>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 45.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/constants_offset.mlir

      func.return %0 : tensor<4xcomplex<f64>>
    }
    
    func.func @f16() -> tensor<4xf16> {
      // CHECK-LABEL: @f16
      // CHECK: value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : tensor<4xf16>
      %0 = "tfl.pseudo_const"() { value = dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf16> } : () -> tensor<4xf16>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 12.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/constants.mlir

      func.return %0 : tensor<4xcomplex<f64>>
    }
    
    func.func @f16() -> tensor<4xf16> {
      // CHECK-LABEL: @f16
      // CHECK: value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : tensor<4xf16>
      %0 = "tfl.pseudo_const"() { value = dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf16> } : () -> tensor<4xf16>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 12.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td

    include "mhlo/IR/hlo_ops.td"
    
    def SignedIntTensor : TensorOf<[I1, I8, I16, I32, I64]>;
    def UnsignedIntTensor : TensorOf<[UI8, UI16, UI32, UI64]>;
    
    // IEEE compliant floating point tensors.
    def IEEEFloatTensor : TensorOf<[F16, F32, F64]>;
    
    //===----------------------------------------------------------------------===//
    // BatchNorm op patterns.
    //===----------------------------------------------------------------------===//
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 34.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/tests/unfuse_mhlo_batch_norm.mlir

            tensor<256xf64>) -> tensor<4x256xf64>
      func.return %0 : tensor<4x256xf64>
    }
    
    // CHECK-LABEL: @batchNormInference_f16
    // Validate that epsilon is properly down to f16
    // CHECK-DAG: %[[EPS:.+]] = mhlo.constant dense<1.000000e+00> : tensor<256xf16>
    func.func @batchNormInference_f16(
        %x: tensor<4x256xf16>, %scale: tensor<256xf16>, %offset: tensor<256xf16>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 10.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/while_loop_outline.cc

        return true;
      };
      return just_call(while_op.getBody()) && just_call(while_op.getCond());
    }
    
    bool IsCompatibleTypeWithTFLCastOp(Type type) {
      auto elemType = getElementTypeOrSelf(type);
      // F16, F32, F64, BF16 types are allowed.
      if (elemType.isBF16() || elemType.isF16() || elemType.isF32() ||
          elemType.isF64())
        return true;
    
      // I1, I4, I8, I16, I32, I64 types are allowed.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/utils/const_tensor_utils.cc

      // The bytes of floats are stored little-endian.
      switch (elem_type.getIntOrFloatBitWidth()) {
        case 16: {
          assert(bytes_len % 2 == 0);
          // Supports both BF16 and F16.
          assert(elem_type.isF16() || elem_type.isBF16());
          int elem_count = bytes_len / 2;
    
          if (elem_type.isF16()) {
            std::vector<Eigen::half> values;
            values.reserve(elem_count);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 23:04:40 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc

              new_result_type, ArrayRef<Eigen::half>(new_values));
    
          // Create new ConstantOp-Dequantize-Operation sequences. At this moment,
          // old ConstantOp is guaranteed to have one F32->F16 cast regardless of
          // its number of users.
          rewriter.setInsertionPointAfter(op);
          auto new_const = rewriter.create<arith::ConstantOp>(
              op->getLoc(), new_result_type, new_value_attr);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/passes.td

          Option<"unfold_batch_matmul_", "unfold_batchmatmul",
                 "bool", "true",
                 "Unfold BatchMatMul into individual MatMul ops.">,
          Option<"allow_bf16_and_f16_type_legalization_", "allow-bf16-and-f16-type-legalization",
                 "bool", "false",
                 "Allow bf16 type legalization.">,
          Option<"use_fake_quant_num_bits_", "use-fake-quant-num-bits",
                 "bool", "false",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 22.6K bytes
    - Viewed (0)
Back to top