Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 26 for 8x16x4xf32 (0.16 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

        %19 = stablehlo.convert %18 : (tensor<8x16x4xi32>) -> tensor<8x16x4xf32>
        %20 = stablehlo.broadcast_in_dim %9, dims = [0, 1, 2] : (tensor<1x1x1xf32>) -> tensor<8x16x4xf32>
        %21 = stablehlo.multiply %19, %20 : tensor<8x16x4xf32>  // * s1 s2
        %22 = call @uniform_quantize_1(%21, %7, %8) : (tensor<8x16x4xf32>, tensor<1x1x1xf32>, tensor<1x1x1xi8>) -> tensor<8x16x4xi8>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

      func.return %0 : tensor<8x16xf32>
    
    // CHECK-LABEL:minimum
    // CHECK:  "tfl.minimum"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<8x16xf32>) -> tensor<8x16xf32>
    }
    
    func.func @realDiv(%arg0: tensor<8x16xf32>, %arg1: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %0 = "tf.RealDiv"(%arg0, %arg1) : (tensor<8x16xf32>, tensor<8x16xf32>) -> tensor<8x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions_with_quantization_specs.mlir

    // DISABLE-ALL-DOT-GENERAL: @main
    func.func @main(%arg0: tensor<1x1x167xf32>) -> tensor<1x1x64xf32> {
      %0 = stablehlo.constant dense<2.000000e+00> : tensor<167x64xf32>
      %1 = stablehlo.dot_general %arg0, %0, contracting_dims = [2] x [0], precision = [DEFAULT, DEFAULT] : (tensor<1x1x167xf32>, tensor<167x64xf32>) -> tensor<1x1x64xf32>
      return %1 : tensor<1x1x64xf32>
    }
    
    // DISABLE-ALL-DOT-GENERAL: %[[CONST:.+]] = stablehlo.constant dense<2.000000e+00>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 18:09:38 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-composite-functions-tf.mlir

      %44 = "tf.AddV2"(%43, %37) {device = ""} : (tensor<2x16x1xf32>, tensor<2x16x1xf32>) -> tensor<2x16x1xf32>
      %45 = "tf.Mul"(%42, %35) {device = ""} : (tensor<2x16x1xf32>, tensor<2x16x1xf32>) -> tensor<2x16x1xf32>
      %46 = "tf.AddV2"(%45, %34) {device = ""} : (tensor<2x16x1xf32>, tensor<2x16x1xf32>) -> tensor<2x16x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 122.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions.mlir

      %5 = stablehlo.broadcast_in_dim %1, dims = [2] : (tensor<64xf32>) -> tensor<1x1x64xf32>
      %6 = stablehlo.add %4, %5 : tensor<1x1x64xf32>
      %7 = stablehlo.clamp %2, %6, %3 : tensor<1x1x64xf32>
      func.return %7: tensor<1x1x64xf32>
    }
    // CHECK: %[[CONST_0:.*]] = stablehlo.constant dense<2.000000e+00>
    // CHECK: %[[CONST_1:.*]] = stablehlo.constant dense<2.000000e+00>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 49.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc

          return %0 : tensor<1x1x4xf32>
        }
      )mlir";
    
      OwningOpRef<ModuleOp> module_op =
          ParseModuleOpString(kModuleXlaCallModuleOpWithDefaultQuantizationMethod);
      ASSERT_TRUE(module_op);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 07:19:09 UTC 2024
    - 14.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_legacy.mlir

    }
    
    // CHECK-LABEL: softmax
    func.func @softmax(%arg0: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %0 = "tf.Softmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
      func.return %0 : tensor<8x16xf32>
    // CHECK: %[[SOFTMAX_0:.*]] = "tf.Softmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
    // CHECK: return %[[SOFTMAX_0]] : tensor<8x16xf32>
    }
    
    // CHECK-LABEL: conv2d_backprop_input_with_add
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

      // CHECK: return %0
    }
    
    // CHECK-LABEL: testAddOfNegRight
    func.func @testAddOfNegRight(%arg0: tensor<8x16xf32>, %arg1: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %0 = "tf.Neg"(%arg1) : (tensor<8x16xf32>) -> tensor<8x16xf32>
      %1 = "tf.Add"(%arg0, %0) {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<8x16xf32>, tensor<8x16xf32>) -> tensor<8x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc

          return %0 : tensor<1x1x4xf32>
        }
      )mlir";
    
      const OwningOpRef<ModuleOp> module_op =
          ParseModuleOpString(kXlaCallModuleOpWithQuantizationMethodAttr);
      ASSERT_TRUE(module_op);
    
      func::FuncOp main_fn = FindMainFuncOp(*module_op);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/canonicalize.mlir

    func.func @reshape_vector_shape(tensor<4x4x4xf32>) -> tensor<16x4xf32> {
    ^bb0(%arg0: tensor<4x4x4xf32>) :
      %shape0 = arith.constant dense<[[16, 4]]> : tensor<1x2xi32>
      // expected-error @+1 {{'tfl.reshape' op requires 'shape' to be rank 1, but got 2}}
      %1 = "tfl.reshape"(%arg0, %shape0) : (tensor<4x4x4xf32>, tensor<1x2xi32>) -> tensor<16x4xf32>
      func.return %1 : tensor<16x4xf32>
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.6K bytes
    - Viewed (0)
Back to top