Search Options

Results per page
Sort
Preferred Languages
Advance

Results 111 - 120 of 166 for mat_mul (0.12 sec)

  1. tensorflow/compiler/mlir/lite/tests/legalize-tf-while.mlir

      %4 = "tf.Sub"(%3, %cst_2) : (tensor<?xi32>, tensor<i32>) -> tensor<?xi32>
      %5 = "tf.Transpose"(%arg3, %4) : (tensor<*xf32>, tensor<?xi32>) -> tensor<*xf32>
      %6 = "tf.MatMul"(%1, %5) {transpose_a = false, transpose_b = true} : (tensor<?x?xf32>, tensor<*xf32>) -> tensor<?x?xf32>
      %7 = "tf.AddV2"(%arg4, %6) {T = f32, device = ""} : (tensor<*xf32>, tensor<?x?xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_custom_aggregation_ops.mlir

      func.func private @composite_matmul_with_bias_fn_1(%arg0: tensor<1x4xf32>, %arg1: tensor<4x3xf32>, %arg2: tensor<3xf32>) -> tensor<1x3xf32> attributes {tf_quant.composite_function} {
        %0 = "tf.MatMul"(%arg0, %arg1) <{grad_a = false, grad_b = false, transpose_a = false, transpose_b = false}> {attr_map = "0:transpose_a,1:transpose_b", device = ""} : (tensor<1x4xf32>, tensor<4x3xf32>) -> tensor<1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 32.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc

            }
          }
    
          if (!is_weight_constant) {
            if (!function_name.contains("matmul") &&
                !function_name.contains("einsum")) {
              return absl::InternalError(
                  "Non-constant weights are not supported at the moment,"
                  " except matmul and einsum.");
            } else if (!quant_options_.enable_two_input_tensors() &&
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 16.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc

          if (function_name.contains("with_bias")) {
            spec->biases_params[2] = {{0, 1},
                                      quant::GetUniformQuantizedTypeForBias};
          }
        } else if (function_name.contains("matmul")) {
          spec->coeff_op_quant_dim[1] = -1;
          if (function_name.contains("with_bias") ||
              function_name.contains("and_bias")) {
            spec->biases_params[2] = {{0, 1},
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  5. tensorflow/cc/gradients/math_grad_test.cc

      void TestMatMulGrad(const bool t_x, const bool t_y) {
        TestMatMulGradHelper<T>(
            /*is_x_batch=*/false, /*is_y_batch=*/false, t_x, t_y,
            [&](Output x, Output y) {
              return MatMul(root_, x, y, MatMul::TransposeA(t_x).TransposeB(t_y));
            });
      }
    
      template <typename T>
      void TestBatchMatMulGrad(const bool t_x, const bool t_y) {
        TestMatMulGradHelper<T>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 36K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc

          return func_.emitWarning()
                 << "Invalid number of arguments in the embedding "
                    "matmul composite function";
        }
        if (func_.getFunctionType().getNumResults() != 1) {
          return func_.emitWarning() << "Invalid number of results in the "
                                        "embedding matmul composite function";
        }
        return success();
      }
    
     private:
      func::FuncOp func_;
    };
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

    }
    
    func.func @matmul(%arg0: tensor<40x37xf32>, %arg1: tensor<37x40xf32>) -> tensor<40x40xf32> {
      %0 = "tf.MatMul"(%arg0, %arg1) {T = "tfdtype$DT_FLOAT", device = "/device:CPU:0", name = "MatMul", transpose_a = false, transpose_b = false} :
    (tensor<40x37xf32>, tensor<37x40xf32>) -> tensor<40x40xf32>
      func.return %0 : tensor<40x40xf32>
    // CHECK-LABEL: matmul
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_activity.proto

    message XlaAutoClusteringSummary {
      // Represents a single element in a histogram of ops ("op" as in "TensorFlow
      // operation").
      //
      // Next ID: 3
      message OpAndCount {
        // The TensorFlow operation (like MatMult, Add etc.)
        string op = 1;
    
        // The number of times this occurs.
        int32 count = 2;
      }
    
      // Describes a single XLA cluster.
      //
      // Next ID: 4
      message Cluster {
        string name = 1;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 15 03:11:33 UTC 2022
    - 3.6K bytes
    - Viewed (0)
  9. tensorflow/c/eager/c_api_test_util.h

                                   const tensorflow::string& device_name = "");
    
    // Return an add op multiplying `a` by `b`.
    TFE_Op* AddOp(TFE_Context* ctx, TFE_TensorHandle* a, TFE_TensorHandle* b);
    
    // Return a matmul op multiplying `a` by `b`.
    TFE_Op* MatMulOp(TFE_Context* ctx, TFE_TensorHandle* a, TFE_TensorHandle* b);
    
    // Return an identity op.
    TFE_Op* IdentityOp(TFE_Context* ctx, TFE_TensorHandle* a);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jul 17 23:43:59 UTC 2023
    - 7.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/tf_to_corert_pipeline.mlir

          %outputs_16, %control_17 = tf_executor.island wraps "tf.Reshape"(%outputs_14, %outputs_6) {device = ""} : (tensor<16x16x16x?xf32>, tensor<2xi32>) -> tensor<?x16384xf32>
          %outputs_18, %control_19 = tf_executor.island wraps "tf.MatMul"(%outputs_16, %outputs_4) {device = "", transpose_a = false, transpose_b = false} : (tensor<?x16384xf32>, tensor<*xf32>) -> tensor<?x?xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 7.7K bytes
    - Viewed (0)
Back to top