Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 33 for max_pool (0.17 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir

      %7 = "tf.Relu"(%6) : (tensor<?x64x112x112xf32>) -> tensor<?x64x112x112xf32>
      %8 = "tf.MaxPool"(%7)
           {
             data_format = "NCHW",
             ksize = [1, 1, 3, 3],
             padding = "SAME",
             strides = [1, 1, 2, 2]
           } : (tensor<?x64x112x112xf32>) -> tensor<?x64x56x56xf32>
    
      // CHECK: %[[MAX_POOL:[0-9]*]] = "tf.MaxPool"
      // CHECK-SAME: data_format = "NHWC"
      // CHECK-SAME: ksize = [1, 3, 3, 1]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_end.mlir

    func.func @fold_into_max_pool(%arg0: tensor<1x64x112x112xf32>) -> tensor<1x56x56x64xf32> {
    
      // MaxPool operand transpose must be folded into the op and MaxPool
      // must use NCHW data format with updated kernel size and strides.
    
      // CHECK: %[[RES_PERM:.*]] = "tf.Const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi32>}>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py

              out = array_ops.concat([out, ones], 0)
            elif self.same_scale_op == 'gather':
              out = array_ops.gather(out, indices=[0], axis=0)
            elif self.same_scale_op == 'max_pool':
              out = nn_ops.max_pool(out, ksize=3, strides=1, padding='SAME')
            elif self.same_scale_op == 'pad':
              paddings = array_ops.ones(
                  (array_ops.rank(out), 2), dtype=dtypes.int32
              )
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

      @parameterized.parameters(
          testing.parameter_combinations([{
              'same_scale_op': (
                  'concatenate',
                  'gather',
                  'max_pool',
                  'pad',
                  'reshape',
                  'select',
                  'slice',
                  'transpose',
              ),
          }])
      )
      @test_util.run_in_graph_and_eager_modes
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/ir/tfl_ops.cc

    int64_t MaxPool2DOp::GetArithmeticCount(Operation* op) {
      int64_t count;
      if (ArithmeticCountUtilHelper::GetFirstOutputCount(op, &count)) {
        auto max_pool = llvm::dyn_cast<MaxPool2DOp>(op);
        return max_pool.getFilterHeight() * max_pool.getFilterWidth() * count;
      }
    
      return -1;
    }
    
    //===----------------------------------------------------------------------===//
    // L2NormalizationOp
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 169.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir

    // CHECK: %[[MAX_POOL:.*]] = "tfl.max_pool_2d"(%[[ARG0]])
    // CHECK-SAME: {filter_height = 3 : i32, filter_width = 4 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 2 : i32, stride_w = 3 : i32}
    // CHECK-SAME: (tensor<2x9x10x3x!quant.uniform<i8:f32, 3.000000e-01:-5>>) -> tensor<2x4x3x3x!quant.uniform<i8:f32, 3.000000e-01:-5>>
    // CHECK: return %[[MAX_POOL]]
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 106.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tfr/examples/mnist/mnist_train.py

        # output shape: [-1, 14, 14, 32]
        max_pool1 = gen_mnist_ops.new_max_pool(conv1, 2, 2, 2, 2, 'SAME')
    
        # output shape: [-1, 14, 14, 64]
        conv2 = gen_mnist_ops.new_conv2d(max_pool1, self.weights['f2'],
                                         self.biases['b2'], 1, 1, 1, 1, 'SAME',
                                         'RELU')
    
        # output shape: [-1, 7, 7, 64]
        max_pool2 = gen_mnist_ops.new_max_pool(conv2, 2, 2, 2, 2, 'SAME')
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 20 03:05:18 UTC 2021
    - 6.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

        DefaultValuedOptionalAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "\"NDHWC\"">:$data_format
      );
    
      let results = (outs
        Res<TF_IntOrFpTensor, [{Gradients of gradients w.r.t. the input to `max_pool`.}]>:$output
      );
    
      TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
    }
    
    def TF_MaxPoolGradOp : TF_Op<"MaxPoolGrad", [Pure]> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc

    // "SAME").
    bool IsSpatialPoolingWithoutDilation(
        mhlo::ReduceWindowOp rw, llvm::SmallVectorImpl<int64_t>* window_strides,
        std::string* padding_mode, std::string* data_format) {
      // tf.max_pool or tf.avg_pool need at least 3 dimensions (batch, spatial,
      // channel).
      const uint64_t rank = rw.getWindowDimensions().size();
      if (rank <= 3 || rank > 5) return false;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 154.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize.mlir

        func.return %a: tensor<*xf32>
      }
    
    // CHECK-LABEL: same_scale_test
    // CHECK: %[[maxpool:.*]] = "tf.MaxPool"
    // CHECK: %[[q1:.*]] = "quantfork.qcast"(%[[maxpool]])
    // CHECK-SAME: quant.uniform<i8:f32, 5.000000e-02:-10>
    // CHECK: %[[dq1:.*]] = "quantfork.dcast"(%[[q1]])
    // CHECK-SAME: quant.uniform<i8:f32, 5.000000e-02:-10>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Dec 29 02:42:57 UTC 2022
    - 2.1K bytes
    - Viewed (0)
Back to top