Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 114 for mat_mul (0.27 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

    // NEXT ID: 7
    message UnitWiseQuantizationSpec {
      // Quantization unit granularity.
      // NEXT ID: 4
      message QuantizationUnit {
        // Type of the op, ex: Conv2D, MatMul, Einsum... The node_name field can
        // be omitted if it is intended to match all nodes with this type.
        string op_type = 1;
        // Name of the node. This field accepts re2 regex format. If the node name
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_xla.mlir

      %dq_weight = "quantfork.dcast"(%q_weight) : (tensor<144x12x!quant.uniform<i8:f32, 0.074855112561992565:-1>>) -> tensor<144x12xf32>
      %9 = "tf.MatMul"(%7, %dq_weight) {transpose_a = false, transpose_b = false} : (tensor<*xf32>, tensor<144x12xf32>) -> tensor<*xf32>
      %10 = "quantfork.qcast"(%9) {volatile} : (tensor<*xf32>) -> tensor<*x!quant.uniform<i8:f32, 4.000000e-03:-12>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver.mlir

      }
    
      func.func private @composite_matmul_with_bias_fn_1(%arg0: tensor<1x4xf32>, %arg1: tensor<4x3xf32>, %arg2: tensor<3xf32>) -> tensor<1x3xf32> attributes {tf_quant.composite_function} {
        %0 = "tf.MatMul"(%arg0, %arg1) <{grad_a = false, grad_b = false, transpose_a = false, transpose_b = false}> {attr_map = "0:transpose_a,1:transpose_b", device = ""} : (tensor<1x4xf32>, tensor<4x3xf32>) -> tensor<1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  4. tensorflow/cc/gradients/math_grad.cc

                            std::vector<Output>* grad_outputs) {
      if (is_batch == false) {
        auto dx =
            MatMul(scope, x0, x1, MatMul::TransposeA(adj_x0).TransposeB(adj_x1));
        grad_outputs->push_back(dx);
        auto dy =
            MatMul(scope, y0, y1, MatMul::TransposeA(adj_y0).TransposeB(adj_y1));
        grad_outputs->push_back(dy);
      } else {
        auto dx = BatchMatMulV3(scope, x0, x1, x_data_type,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 50.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

        } : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
        %6 = "tf.Cast"(%5) : (tensor<*xf32>) -> tensor<*xi32>
        func.return %6 : tensor<*xi32>
      }
    
      // Matmul with int32 accumulation.
      func.func private @internal_matmul_fn(
                             %input : tensor<*xi8>, %weight : tensor<*xi8>,
                             %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf_test.cc

      // May have been filtered so check for lack of failure instead of success.
      EXPECT_EQ(compilation_status.Delta(kMlirWithFallbackModeFailure), 0);
    }
    
    TEST(LegalizeTFTest, MatMul) {
      static constexpr char kMatMulModuleStr[] = R"(
      module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
        func.func @main() -> (tensor<5x11xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 23:59:33 UTC 2024
    - 16.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/legalize-tf-while.mlir

      %4 = "tf.Sub"(%3, %cst_2) : (tensor<?xi32>, tensor<i32>) -> tensor<?xi32>
      %5 = "tf.Transpose"(%arg3, %4) : (tensor<*xf32>, tensor<?xi32>) -> tensor<*xf32>
      %6 = "tf.MatMul"(%1, %5) {transpose_a = false, transpose_b = true} : (tensor<?x?xf32>, tensor<*xf32>) -> tensor<?x?xf32>
      %7 = "tf.AddV2"(%arg4, %6) {T = f32, device = ""} : (tensor<*xf32>, tensor<?x?xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_custom_aggregation_ops.mlir

      func.func private @composite_matmul_with_bias_fn_1(%arg0: tensor<1x4xf32>, %arg1: tensor<4x3xf32>, %arg2: tensor<3xf32>) -> tensor<1x3xf32> attributes {tf_quant.composite_function} {
        %0 = "tf.MatMul"(%arg0, %arg1) <{grad_a = false, grad_b = false, transpose_a = false, transpose_b = false}> {attr_map = "0:transpose_a,1:transpose_b", device = ""} : (tensor<1x4xf32>, tensor<4x3xf32>) -> tensor<1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 32.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc

            }
          }
    
          if (!is_weight_constant) {
            if (!function_name.contains("matmul") &&
                !function_name.contains("einsum")) {
              return absl::InternalError(
                  "Non-constant weights are not supported at the moment,"
                  " except matmul and einsum.");
            } else if (!quant_options_.enable_two_input_tensors() &&
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 16.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc

          if (function_name.contains("with_bias")) {
            spec->biases_params[2] = {{0, 1},
                                      quant::GetUniformQuantizedTypeForBias};
          }
        } else if (function_name.contains("matmul")) {
          spec->coeff_op_quant_dim[1] = -1;
          if (function_name.contains("with_bias") ||
              function_name.contains("and_bias")) {
            spec->biases_params[2] = {{0, 1},
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.3K bytes
    - Viewed (0)
Back to top