Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 39 for 4x4x8xf32 (0.12 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_drq.mlir

    func.func @lift_float_batch_matmul(%arg0: tensor<4x4x3xf32>) -> (tensor<4x4x3xf32>) {
      %cst = "tf.Const"() {device = "", value = dense<1.0> : tensor<4x3x3xf32>} : () -> tensor<4x3x3xf32>
      %0 = "tf.BatchMatMulV2"(%arg0, %cst) {adj_x = false, adj_y = false, device = ""} : (tensor<4x4x3xf32>, tensor<4x3x3xf32>) -> tensor<4x4x3xf32>
      return %0 : tensor<4x4x3xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/batchmatmul_to_einsum.mlir

      // CHECK-LABEL: test_batch_matmul_broadcast_to_einsum
      // CHECK: "tf.Einsum"(%arg0, %arg1) <{equation = "...mk,...kn->...mn"}> : (tensor<2x2x4xf32>, tensor<2x4x2xf32>) -> tensor<2x2x2xf32>
      %0 = "tf.BatchMatMul"(%arg0, %arg1) {adj_x = false, adj_y = false} : (tensor<2x2x4xf32>, tensor<2x4x2xf32>) -> tensor<2x2x2xf32>
      func.return %0: tensor<2x2x2xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/unidirectional_sequence_lstm.mlir

    func.func @main(tensor<4x4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4x4xf32>, tensor<4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>) -> tensor<4x4x4xf32> {
    // CHECK: {
    // CHECK-NEXT:   version: 3,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 06 18:55:51 UTC 2023
    - 11.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

    // CHECK-NEXT: return %[[dq]] : tensor<2x2xf32>
    }
    
    // CHECK-LABEL: prepareStatistics
    func.func @prepareStatistics(%arg0: tensor<8x4x3xf32>) -> tensor<8x4x3xf32> {
      %0 = "quantfork.stats"(%arg0) {
        layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>
      } : (tensor<8x4x3xf32>) -> tensor<8x4x3xf32>
      %1 = "quantfork.stats"(%0) {
        layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>,
        axisStats = dense<[
          [-1.0, 1.0],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/tpu_sharding_identification.mlir

      "tf_device.cluster_func"(%1) {func = @cluster_func, use_spmd_for_xla_partitioning = true, num_cores_per_replica = 8 : i64} : (tensor<4x4x4xf32>) -> ()
      func.return
    }
    
    func.func @cluster_func(%arg0: tensor<4x4x4xf32>) {
      func.return
    }
    
    // -----
    
    // Tests partitioned inputs/outputs with no sharding (via XLA SPMD) defaults to
    // replicate sharding ("").
    
    // CHECK-LABEL: func @partitioned_input_output
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 20 19:07:52 UTC 2024
    - 47.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/tpu_update_embedding_enqueue_op_inputs.mlir

      %2 = "tf.Const"() {value = dense<0.0> : tensor<2x2xf32>} : () -> tensor<2x2xf32>
      %3 = "tf.Const"() {value = dense<0.0> : tensor<4x4xf32>} : () -> tensor<4x4xf32>
      "tf.SendTPUEmbeddingGradients"(%2, %3) {_tpu_embedding_layer = "call1", config = "\0A\0B\0C\0D", operandSegmentSizes = array<i32: 2, 0>} : (tensor<2x2xf32>, tensor<4x4xf32>) -> ()
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 5.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

      %3 = "tf.AddV2"(%arg0, %0): (tensor<4x4xf32>, tensor<1xf32>) -> tensor<4x4xf32>
      %4 = "tf.Log"(%3) {device = "/job:localhost/replica:0/task:0/device:GPU:0"}: (tensor<4x4xf32>) -> tensor<4x4xf32>
    
      // CHECK: %[[ADD1:.*]] = "tf.AddV2"
      // CHECK: %[[LOG1:.*]] = "tf.Log"(%[[ADD1]])
      %5 = "tf.AddV2"(%4, %1): (tensor<4x4xf32>, tensor<1xf32>) -> tensor<4x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/quantize-variables.mlir

      %43 = "quantfork.stats"(%42) {layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>} : (tensor<1x4x3xf32>) -> tensor<1x4x3xf32>
      %44 = "tfl.strided_slice"(%43, %1, %2, %3) {begin_mask = 7 : i32, ellipsis_mask = 0 : i32, end_mask = 5 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 0 : i32, offset = false} : (tensor<1x4x3xf32>, tensor<3xi32>, tensor<3xi32>, tensor<3xi32>) -> tensor<1x2x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir

    // CHECK: %[[CONV:.+]] = stablehlo.convolution(%[[TRANSPOSE_0]], %[[CONST]]) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = {{\[\[}}1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x4x4x8xf32>, tensor<3x3x8x8xf32>) -> tensor<1x4x4x8xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 12.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/dilated-conv.mlir

      %3 = "tf.Squeeze"(%2) {squeeze_dims = [-3]} : (tensor<2x1x64x8xf32>) -> tensor<2x64x8xf32>
      %4 = "tf.BatchToSpaceND"(%3, %cst_1, %cst) : (tensor<2x64x8xf32>, tensor<1xi32>, tensor<1x2xi32>) -> tensor<1x128x8xf32>
      func.return %4 : tensor<1x128x8xf32>
    
      // CHECK-LABEL: testDilatedConv1DExpandH
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 44.7K bytes
    - Viewed (0)
Back to top