Search Options

Results per page
Sort
Preferred Languages
Advance

Results 131 - 140 of 147 for CST (0.02 sec)

  1. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_legacy.mlir

      %cst = "tf.Const"() {device = "", value = dense<[2.167590e-01]> : tensor<1xf32>} : () -> tensor<1xf32>
      %cst_1 = "tf.Const"() {device = "", value = dense<1.000000e-03> : tensor<f32>} : () -> tensor<f32>
      %0 = "tf.Identity"(%cst) {device = ""} : (tensor<1xf32>) -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir

      // NOFOLD: %[[PADDING:.*]] = "tf.Const"(){{.*}}[0, 0], [0, 0], [3, 3], [3, 3]
      // NOFOLD: %[[CST:.*]] = "tf.Const"() <{value = dense<[0, 3, 1, 2]> : tensor<4xi32>}> : () -> tensor<4xi32>
      // NOFOLD: %[[TRANSPOSE:[0-9]*]] = "tf.Transpose"(%arg0, %[[CST]]) : (tensor<?x224x224x3xf32>, tensor<4xi32>) -> tensor<?x3x224x224xf32>
    
      // Pad input with new paddings.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/tf_to_quant.mlir

      func.return %0 : tensor<8x3xf32>
    
    // CHECK:  %[[fq:.*]] = "tf.FakeQuantWithMinMaxVarsPerChannel"(%arg0, %cst, %cst_0)
    // CHECK:  %[[q:.*]] = "quantfork.qcast"(%[[fq]]) : (tensor<8x3xf32>) -> tensor<8x3x!quant.uniform<i8:f32:1, {1.000000e+00:-128,1.000000e+00:-127,1.000000e+00:-128}>>
    // CHECK:  %[[dq:.*]] = "quantfork.dcast"(%[[q]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/tf_to_quant_4bit.mlir

      func.return %0 : tensor<8x3xf32>
    
    // CHECK:  %[[fq:.*]] = "tf.FakeQuantWithMinMaxVarsPerChannel"(%arg0, %cst, %cst_0)
    // CHECK:  %[[q:.*]] = "quantfork.qcast"(%[[fq]]) : (tensor<8x3xf32>) -> tensor<8x3x!quant.uniform<i4:f32:1, {1.000000e+00:-8,1.000000e+00:-7,1.000000e+00:-8}>>
    // CHECK:  %[[dq:.*]] = "quantfork.dcast"(%[[q]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h

    // the quantization parameters as a TypeAttr and "quant.dcast" op used to
    // convert the output type to the next op. Here are the transformations:
    //
    // input   min cst       max cst              input
    //  \       |             |                     |
    //   \  (tf.Identity) (tf.Identity)   =>   quant.qcast
    //    \     |             |                     |
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/shape_inference_with_shape_specialization.mlir

        // CHECK-DAG: %[[CST_1:.*]] = "tf.Const"() <{value = dense<3> : tensor<i32>}> : () -> tensor<i32>
        // CHECK-NEXT: %[[UQ:.*]] = "tf.UniformQuantize"(%arg0, %cst, %cst_0) <{quantization_axis = -1 : i64, quantization_max_val = 127 : i64, quantization_min_val = -128 : i64}> : (tensor<1xf32>, tensor<f32>, tensor<i32>) -> tensor<1x!tf_type.qint8>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 12:49:45 UTC 2024
    - 2.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/group_by_dialect.mlir

      %2 = mhlo.constant dense<[[1.1]]> : tensor<1x1xf32>
      %3 = mhlo.multiply %2, %2 : tensor<1x1xf32>
      %cst = "tf.Const"() {value = dense<0.0> : tensor<f32>} : () -> tensor<f32>
      %0 = "tf.AddV2"(%arg0, %cst) {device = "/device:CPU:0"} : (tensor<f32>, tensor<f32>) -> tensor<f32>
      %1 = "tf.Identity"(%0) {device = ""} : (tensor<f32>) -> tensor<f32>
      "tf.NoOp"() {device = ""} : () -> ()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 28 23:43:21 UTC 2022
    - 5.7K bytes
    - Viewed (0)
  8. src/main/java/jcifs/pac/PacMac.java

            byte[] cst = new byte[] {
                (byte) ( ( usage >> 24 ) & 0xFF ), (byte) ( ( usage >> 16 ) & 0xFF ), (byte) ( ( usage >> 8 ) & 0xFF ), (byte) ( usage & 0xFF ),
                (byte) 0x99
            };
    
            byte[] output = new byte[12];
            byte[] dk = deriveKeyAES(baseKey, cst); // Checksum key
            try {
                Mac m = Mac.getInstance("HmacSHA1");
    Registered: Wed Jun 12 15:45:55 UTC 2024
    - Last Modified: Sun Jul 01 13:12:10 UTC 2018
    - 7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/get-arithmetic-count.mlir

    }
    
    func.func @testTransposeConv(%arg0: tensor<4xi32>, %arg1: tensor<32x4x4x128xf32>, %arg2: tensor<1x32x42x128xf32>) -> tensor<1x64x84x32xf32> {
      %cst = "tfl.no_value"() {value = unit} : () -> none
      // CHECK: _arithmetic_count = 176160768 : i64
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 14 04:58:17 UTC 2022
    - 7.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/optimize_batch_matmul.mlir

    // CHECK-LABEL: FuseTransposeFCRhsToBatchMatmul
    func.func @FuseTransposeFCRhsToBatchMatmul(%arg0: tensor<16x1024xf32>, %arg1: tensor<1024x128xf32>, %arg2: none) -> tensor<16x128xf32> {
      %cst = arith.constant dense<[1, 0]> : tensor<2xi32>
      %0 = "tfl.transpose"(%arg1, %cst) : (tensor<1024x128xf32>, tensor<2xi32>) -> tensor<128x1024xf32>
      // CHECK: "tfl.batch_matmul"(%arg0, %arg1)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 9K bytes
    - Viewed (0)
Back to top