Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 495 for opt_size (0.36 sec)

  1. tensorflow/compiler/mlir/lite/transforms/lift_tflite_flex_ops.cc

            reinterpret_cast<const uint8_t*>(custom_options.data());
        const size_t opt_size = custom_options.size();
        if (!flexbuffers::VerifyBuffer(opt_data, opt_size)) {
          return emitError(loc, "invalid custom options");
        }
    
        const flexbuffers::Vector& v =
            flexbuffers::GetRoot(opt_data, opt_size).AsVector();
    
        op_name = v[0].AsString().str();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/optimize.mlir

    // RUN: tf-opt -tf-optimize %s -o %t && FileCheck %s < %t
    
    // CHECK-LABEL: convbiasaddmul
    func.func @convbiasaddmul(%arg: tensor<256x32x32x3xf32>) -> tensor<256x8x7x16xf32> {
      %filter = arith.constant dense<2.0> : tensor<3x3x3x16xf32>
      %bias = arith.constant dense<3.0> : tensor<16xf32>
      %value = arith.constant dense<4.0> : tensor<16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/optimize.cc

     public:
      OptimizeIntGraph() = default;
      OptimizeIntGraph(const OptimizeIntGraph &) = default;
      void runOnOperation() override;
    };
    
    #include "tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/optimize.inc"
    
    void OptimizeIntGraph::runOnOperation() {
      RewritePatternSet patterns(&getContext());
      populateWithGenerated(patterns);
      auto func = getOperation();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Feb 24 02:26:47 UTC 2024
    - 2.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/optimize.cc

        // the textual format (on the commandline for example).
        return "quant-optimize";
      }
      StringRef getDescription() const final {
        // This is a brief description of the pass.
        return "Applies optimization after quantization";
      }
    
      void runOnOperation() override;
    };
    
    #include "tensorflow/compiler/mlir/quantization/tensorflow/passes/optimize.inc"
    
    void OptimizePass::runOnOperation() {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfrt/tests/optimize.mlir

    // RUN: tf-tfrt-opt -optimize-tf-for-tfrt -split-input-file -verify-diagnostics %s | FileCheck %s
    
    // CHECK-LABEL: @fold_device_index
    func.func @fold_device_index() -> tensor<i32> {
      // CHECK-NOT: tf.DeviceIndex
      // CHECK: tf.Const
      // CHECK-SAME: value = dense<1> : tensor<i32>
      %0 = "tf.DeviceIndex"() {device = "/device:CPU:0", device_names = ["GPU", "CPU"]} : () -> tensor<i32>
      func.return %0 : tensor<i32>
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jul 01 23:50:06 UTC 2023
    - 2.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/optimize.mlir

    // RUN: tf-quant-opt %s -quant-optimize -allow-unregistered-dialect | FileCheck %s
    
    func.func @remove_redundant_cast(%arg0: tensor<1x100x100x1xf32>) -> (tensor<1x96x96x1xf32>) {
      %cst = "tf.Const"() {value = dense<-128> : tensor<i32>} : () -> tensor<i32>
      %cst_0 = "tf.Const"() {value = dense<0.0235294122> : tensor<f32>} : () -> tensor<f32>
      %cst_1 = "tf.Const"() {value = dense<0.00708661414> : tensor<1xf32>} : () -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 8.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/optimize.cc

    #include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
    
    namespace mlir {
    namespace TFL {
    
    //===----------------------------------------------------------------------===//
    // The actual Optimize Pass.
    namespace {
    #define GEN_PASS_DEF_OPTIMIZEPASS
    #include "tensorflow/compiler/mlir/lite/transforms/passes.h.inc"
    
    constexpr char kRelu[] = "RELU";
    constexpr char kRelu6[] = "RELU6";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/tests/optimize.mlir

    // RUN: odml-to-stablehlo-opt %s -split-input-file -mhlo-optimize | FileCheck %s
    
    // CHECK-LABEL: testDotToDotGeneralVectorVector
    func.func @testDotToDotGeneralVectorVector(%arg0: tensor<3072xf32>, %arg1: tensor<3072xf32>) -> tensor<f32> {
      %0 = "mhlo.dot"(%arg0, %arg1) : (tensor<3072xf32>, tensor<3072xf32>) -> tensor<f32>
      func.return %0 : tensor<f32>
    
    // CHECK:      %[[RES:.*]] = "mhlo.dot_general"(%arg0, %arg1) <{
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 22.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/optimize-after-quantization.mlir

    // RUN: tf-opt %s -tfl-prepare-quantize -canonicalize -tfl-quantize -canonicalize -tfl-optimize -canonicalize | FileCheck %s
    
    // CHECK-LABEL: fuseMulIntoPerTensorConv2dWithQDQs
    func.func @fuseMulIntoPerTensorConv2dWithQDQs(%arg0: tensor<256x32x32x3xf32>) -> tensor<256x8x7x3xf32> {
      %cst = arith.constant dense<1.5> : tensor<3xf32>
      %cst_0 = arith.constant dense<[1.0, 2.0, 3.0]> : tensor<3xf32>
      %w = arith.constant dense<2.0> : tensor<3x3x3x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 1.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/optimize.mlir

    // RUN: stablehlo-quant-opt -optimize-int-graph -split-input-file %s -verify-diagnostics | FileCheck %s
    
    // CHECK-LABEL: func @convolution_add_add
    func.func @convolution_add_add(
        %lhs: tensor<?x3x2x1xi8>, %rhs: tensor<2x1x1x1xi8>,
        %zp_offset: tensor<?x2x2x1xi32>, %bias: tensor<1xi32>
      ) -> tensor<?x2x2x1xi32> {
      // CHECK-DAG: %[[conv:.*]] = mhlo.convolution
      // CHECK-DAG: %[[combined:.*]] = chlo.broadcast_add %[[zp_offset:.*]], %[[bias:.*]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Feb 24 02:26:47 UTC 2024
    - 10.7K bytes
    - Viewed (0)
Back to top