Search Options

Results per page
Sort
Preferred Languages
Advance

Results 101 - 110 of 166 for mat_mul (0.17 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

    // NEXT ID: 7
    message UnitWiseQuantizationSpec {
      // Quantization unit granularity.
      // NEXT ID: 4
      message QuantizationUnit {
        // Type of the op, ex: Conv2D, MatMul, Einsum... The node_name field can
        // be omitted if it is intended to match all nodes with this type.
        string op_type = 1;
        // Name of the node. This field accepts re2 regex format. If the node name
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc

    ==============================================================================*/
    
    // This transformation pass decomposes dense operations that assume
    // support for hybrid quantization. These cases cover when a dense operation
    // (e.g. matmul) has both quantized and unquantized inputs by dequantizing
    // the quantized inputs, performing the operation in the expressed type, then
    // requantizing if a quantized output is required.
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op_stablehlo.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-add-dump-tensor-op='debugger_type=float_per_layer' | FileCheck --check-prefix=FloatPerLayer %s
    
    module {
      func.func @matmul2(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
        %0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 18K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_xla.mlir

      %dq_weight = "quantfork.dcast"(%q_weight) : (tensor<144x12x!quant.uniform<i8:f32, 0.074855112561992565:-1>>) -> tensor<144x12xf32>
      %9 = "tf.MatMul"(%7, %dq_weight) {transpose_a = false, transpose_b = false} : (tensor<*xf32>, tensor<144x12xf32>) -> tensor<*xf32>
      %10 = "quantfork.qcast"(%9) {volatile} : (tensor<*xf32>) -> tensor<*x!quant.uniform<i8:f32, 4.000000e-03:-12>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 11.4K bytes
    - Viewed (0)
  5. tensorflow/c/experimental/ops/math_ops.h

                 AbstractTensorHandle* const y, AbstractTensorHandle** z,
                 const char* name = nullptr, const char* raw_device_name = nullptr);
    
    // Multiply the matrix "a" by the matrix "b".
    Status MatMul(AbstractContext* ctx, AbstractTensorHandle* const a,
                  AbstractTensorHandle* const b, AbstractTensorHandle** product,
                  bool transpose_a = false, bool transpose_b = false,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 10 19:11:36 UTC 2022
    - 4.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver.mlir

      }
    
      func.func private @composite_matmul_with_bias_fn_1(%arg0: tensor<1x4xf32>, %arg1: tensor<4x3xf32>, %arg2: tensor<3xf32>) -> tensor<1x3xf32> attributes {tf_quant.composite_function} {
        %0 = "tf.MatMul"(%arg0, %arg1) <{grad_a = false, grad_b = false, transpose_a = false, transpose_b = false}> {attr_map = "0:transpose_a,1:transpose_b", device = ""} : (tensor<1x4xf32>, tensor<4x3xf32>) -> tensor<1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  7. tensorflow/cc/gradients/math_grad.cc

                            std::vector<Output>* grad_outputs) {
      if (is_batch == false) {
        auto dx =
            MatMul(scope, x0, x1, MatMul::TransposeA(adj_x0).TransposeB(adj_x1));
        grad_outputs->push_back(dx);
        auto dy =
            MatMul(scope, y0, y1, MatMul::TransposeA(adj_y0).TransposeB(adj_y1));
        grad_outputs->push_back(dy);
      } else {
        auto dx = BatchMatMulV3(scope, x0, x1, x_data_type,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 50.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

        } : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
        %6 = "tf.Cast"(%5) : (tensor<*xf32>) -> tensor<*xi32>
        func.return %6 : tensor<*xi32>
      }
    
      // Matmul with int32 accumulation.
      func.func private @internal_matmul_fn(
                             %input : tensor<*xi8>, %weight : tensor<*xi8>,
                             %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf_test.cc

      // May have been filtered so check for lack of failure instead of success.
      EXPECT_EQ(compilation_status.Delta(kMlirWithFallbackModeFailure), 0);
    }
    
    TEST(LegalizeTFTest, MatMul) {
      static constexpr char kMatMulModuleStr[] = R"(
      module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
        func.func @main() -> (tensor<5x11xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 23:59:33 UTC 2024
    - 16.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfr/resources/decomposition_lib.mlir

    //         "complex64, complex128, string}")
    tfr.func @tf__add_(!tfr.tensor<T>, !tfr.tensor<T>)
        -> !tfr.tensor<T> attributes{T}
    
    // Translated from:
    //
    // REGISTER_OP("MatMul")
    //     .Input("a: T")
    //     .Input("b: T")
    //     .Output("product: T")
    //     .Attr("transpose_a: bool = false")
    //     .Attr("transpose_b: bool = false")
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 13 16:33:28 UTC 2021
    - 4.2K bytes
    - Viewed (0)
Back to top