Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 178 for conv_2d (0.41 sec)

  1. tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel.pbtxt

        key: "narrow_range"
        value {
          b: true
        }
      }
      attr {
        key: "num_bits"
        value {
          i: 8
        }
      }
    }
    node {
      name: "BoxPredictor_4/ClassPredictor/Conv2D"
      op: "Conv2D"
      input: "input"
      input: "BoxPredictor_4/ClassPredictor/weights_quant/FakeQuantWithMinMaxVarsPerChannel"
      attr {
        key: "T"
        value {
          type: DT_FLOAT
        }
      }
      attr {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      // CHECK: %0 = "tfl.conv_2d"(%arg0, %arg1, %cst)
    }
    
    // CHECK-LABEL: fuse4DAddIntoConv2d
    func.func @fuse4DAddIntoConv2d(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<2x3x3x3xf32>) -> tensor<256x32x32x2xf32> {
      %cst = arith.constant dense<[[[[1.0, 2.0]]]]> : tensor<1x1x1x2xf32>
      %cst_0 = arith.constant dense<[1.0, 2.0]> : tensor<2xf32>
      %0 = "tfl.conv_2d"(%arg0, %arg1, %cst_0) {
        dilation_h_factor = 1 : i32,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant-4bit.mlir

    // CHECK: %[[QUANTIZE:.*]] = "tfl.quantize"(%[[CONSTANT0]]) <{qtype = tensor<16x3x3x3x!quant.uniform<u4:f32, 1.000000e+00>>}>
    // CHECK: %[[DEQUANTIZE:.*]] = "tfl.dequantize"(%[[QUANTIZE]])
    // CHECK: %[[CONV:.*]] = "tfl.conv_2d"(%arg0, %[[DEQUANTIZE]], %[[CONSTANT]])
    // CHECK: return %[[CONV]]
    }
    
    // CHECK-LABEL: perChannelFakeQuantWithConv2D
    func.func @perChannelFakeQuantWithConv2D(tensor<256x32x32x3xf32>) -> (tensor<256x8x7x16xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 22K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/quantization.mlir

      %2 = "tfl.pseudo_qconst"() {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>, value = dense<0> : tensor<32xi32>} : () -> tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/hardwares/cpu_hardware.cc

      TargetHardwareOpRegistration<CpuHardware, Op> Op##_CpuHardware_hardware( \
          Create);
    
    // Operation costs on CPU
    
    // Currently used for these ops:
    // tfl.conv_2d / tfl.depthwise_conv_2d / tfl.fully_connected
    class CpuConvOp : public TargetHardwareOperation {
      double GetOpCost(mlir::Operation* op) const override {
        float cost = 0.0;
        int64_t arithmetic_count;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 5.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/hardwares/gpu_hardware.cc

          return false;
        }
        return true;
      }
    };
    std::unique_ptr<TargetHardwareOperation> CreateConcatOp() {
      return std::make_unique<GpuConcatOp>();
    }
    
    // Currently used for these ops:
    // tfl.conv_2d / tfl.depthwise_conv_2d / tfl.fully_connected
    class GpuConvOp : public TargetHardwareOperation {
      double GetOpCost(mlir::Operation* op) const override {
        int64_t arithmetic_count;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 7.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant.mlir

    // CHECK: %[[QUANTIZE:.*]] = "tfl.quantize"(%[[CONSTANT0]]) <{qtype = tensor<16x3x3x3x!quant.uniform<u8:f32, 1.000000e+00>>}>
    // CHECK: %[[DEQUANTIZE:.*]] = "tfl.dequantize"(%[[QUANTIZE]])
    // CHECK: %[[CONV:.*]] = "tfl.conv_2d"(%arg0, %[[DEQUANTIZE]], %[[CONSTANT]])
    // CHECK: return %[[CONV]]
    }
    
    // CHECK-LABEL: perChannelFakeQuantWithConv2D
    func.func @perChannelFakeQuantWithConv2D(tensor<256x32x32x3xf32>) -> (tensor<256x8x7x16xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/post-quantize.mlir

      %2 = "tfl.pseudo_qconst"() {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>, value = dense<0> : tensor<32xi32>} : () -> tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/quantize.mlir

      %4 = "tfl.dequantize"(%3) : (tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.1>>) -> tensor<32x3x3x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_without_identity.pbtxt

        key: "narrow_range"
        value {
          b: true
        }
      }
      attr {
        key: "num_bits"
        value {
          i: 8
        }
      }
    }
    node {
      name: "BoxPredictor_4/ClassPredictor/Conv2D"
      op: "Conv2D"
      input: "input"
      input: "BoxPredictor_4/ClassPredictor/weights_quant/FakeQuantWithMinMaxVarsPerChannel"
      attr {
        key: "T"
        value {
          type: DT_FLOAT
        }
      }
      attr {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.8K bytes
    - Viewed (0)
Back to top