Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 55 of 55 for matmul_0 (0.18 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/tpu_sharding_identification.mlir

      return %1#0, %1#1 : tensor<1x2xf32>, tensor<1x2xf32>
    }
    func.func @_func(%arg0: tensor<2x4xf32>, %arg1: tensor<4x2xf32>) -> tensor<2x2xf32> {
      %0 = "tf.MatMul"(%arg0, %arg1) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01"} : (tensor<2x4xf32>, tensor<4x2xf32>) -> tensor<2x2xf32>
      return %0 : tensor<2x2xf32>
    }
    
    // -----
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 20 19:07:52 UTC 2024
    - 47.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize.cc

    //   %1 = mhlo.reshape %param : (1xCxZ) -> CxZ
    //   mhlo.dot_general %input, %1 {batch_dims = []}
    // To:
    //   mhlo.dot_general %input, %param {batch_dims = [0]}
    //
    // This usage will mostly come from tf-unroll-batch-matmul, so it's fine to only
    // handle the case where batching dim is the leftmost dim.
    LogicalResult ConvertReshapeDotRhsToBatchedDot(mhlo::DotGeneralOp dot,
                                                   PatternRewriter &rewriter) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 26.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/passes.h

    // Guarantee that all FuncOp's have a single use.
    std::unique_ptr<OperationPass<ModuleOp>> CreateGuaranteeAllFuncsOneUsePass();
    
    // Optional pass which will unroll BatchMatMul and use only MatMul
    std::unique_ptr<OperationPass<func::FuncOp>> CreateUnrollBatchMatMulPassPass();
    
    // Optional pass which will map TF BatchMatMul to TF Einsum
    std::unique_ptr<OperationPass<func::FuncOp>> CreateBatchMatMulToEinsumPass();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 31.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_launch_util.cc

        //
        // 2. Old fashion Tensor with raw device memory pointer. This case occurs
        // when the producer is a non-XLA TF GPU kernel or function (e.g.
        // tf.matmul).
        //
        // 3. AsyncValueTensor, containing a PjRtBuffer. This is the legacy mode
        // and certain device type (e.g. TPU) still uses this path.
        AsyncValueTensor* av_tensor = AsyncValueTensor::FromTensor(tensor);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 40.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_to_mhlo_int_test.cc

        quantization_axis = -1 : i64, quantization_min_val = -128 : i64,
        quantization_max_val = 127 : i64
      } : (tensor<9x10x!tf_type.qint8>, tensor<f32>, tensor<i32>) -> tensor<9x10xf32>
      %0 = "tf.MatMul"(%input, %filter_new) {
      } : (tensor<8x9xf32>, tensor<9x10xf32>) -> tensor<8x10xf32>
      return %0 : tensor<8x10xf32>
    })mlir";
      constexpr absl::string_view kProgram = R"mlir(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 03 01:03:21 UTC 2024
    - 35.8K bytes
    - Viewed (0)
Back to top