Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 26 for MaxPool (0.19 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir

      %7 = "tf.Relu"(%6) : (tensor<?x64x112x112xf32>) -> tensor<?x64x112x112xf32>
      %8 = "tf.MaxPool"(%7)
           {
             data_format = "NCHW",
             ksize = [1, 1, 3, 3],
             padding = "SAME",
             strides = [1, 1, 2, 2]
           } : (tensor<?x64x112x112xf32>) -> tensor<?x64x56x56xf32>
    
      // CHECK: %[[MAX_POOL:[0-9]*]] = "tf.MaxPool"
      // CHECK-SAME: data_format = "NHWC"
      // CHECK-SAME: ksize = [1, 3, 3, 1]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.3K bytes
    - Viewed (0)
  2. tensorflow/cc/gradients/nn_grad_test.cc

      TensorShape y_shape({1, 1, 1, 1});
      auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
      // Setup window and strides so that we only do one MaxPool.
      const std::vector<int> ksize{1, 2, 2, 1};
      const std::vector<int> strides{1, 2, 2, 1};
      auto y = MaxPool(scope_, x, ksize, strides, "VALID");
      Tensor x_init_value = Tensor(DT_FLOAT, x_shape);
      SetRandomValuesForMaxPooling<float>(&x_init_value);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 22 20:45:22 UTC 2022
    - 15K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir

    // CHECK: %[[maxpool:.*]] = "tf.MaxPool"(%[[conv_quant]]) <{data_format = "NHWC", ksize = [1, 2, 2, 1], padding = "VALID", strides = [1, 1, 1, 1]}> : (tensor<*xi8>) -> tensor<*xi8>
    // CHECK: %[[dequantize:.*]] = "tf.PartitionedCall"(%[[maxpool]]
    // CHECK-SAME: f = @dequantize_i8
    // CHECK: return %[[dequantize]]
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 25.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tfr/examples/mnist/ops_defs.py

    def _composite_max_pool(input_, stride_w, stride_h, filter_width, filter_height,
                            padding):
      ksize = [1, filter_width, filter_height, 1]
      strides = [1, stride_w, stride_h, 1]
      return tf.raw_ops.MaxPool(
          input=input_, ksize=ksize, strides=strides, padding=padding)
    
    
    @tf.RegisterGradient('NewMaxPool')
    def _max_pool_grad(op: ops.Operation, grad):
      filter_width = op.get_attr('filter_width')
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 31 20:23:51 UTC 2023
    - 6.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfr/tests/decompose.mlir

      %explicit_paddings = tfr.constant [] -> !tfr.attr
      %data_format = tfr.constant "NHWC" -> !tfr.attr
      %MaxPool = tfr.call @tf__max_pool(%input_, %stride, %filter, %padding, %explicit_paddings, %data_format) : (!tfr.tensor, !tfr.attr, !tfr.attr, !tfr.attr, !tfr.attr, !tfr.attr) -> (!tfr.tensor)
      tfr.return %MaxPool : !tfr.tensor
    // CHECK: tf__max_pool
    }
    
    // CHECK-LABEL: @tf__cast_float
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 16.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

    // CHECK:  %1 = "tf.MaxPool"(%arg0)
    // CHECK:  %2 = "tf.MaxPool"(%arg0)
    }
    
    func.func @maxPool2DChannelFirst(%arg0: tensor<1x16x6x6xf32>) -> tensor<1x16x1x1xf32> {
      // OK
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  7. tensorflow/cc/gradients/nn_grad.cc

          scope, op.input(0), op.output(0), grad_inputs[0], ksize, strides, padding,
          internal::MaxPoolGrad::DataFormat(data_format));
      grad_outputs->push_back(dx);
      return scope.status();
    }
    REGISTER_GRADIENT_OP("MaxPool", MaxPoolGradHelper);
    
    Status MaxPoolGradV2Helper(const Scope& scope, const Operation& op,
                               const std::vector<Output>& grad_inputs,
                               std::vector<Output>* grad_outputs) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 27 23:34:33 UTC 2022
    - 24.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/flags.cc

              " RED: All reduction operations."
              " MISC: Mixed operations."
              " PWRED: TF operations that get converted to PW+RED operation in XLA."
              " REDUCEWINDOW: TF operations like MaxPool/AvgPool that get "
              "converted to ReduceWindow in XLA."
              " REDUCEWINDOWPW: Operation that get converted to ReduceWindow + PW "
              "(LRN, LRNGrad)."
              " BN: TF FusedBatchNorm* operations."
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 17 18:52:57 UTC 2024
    - 24.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

    // -----
    
    // CHECK-LABEL: maxpool_explicit_padding
    func.func @maxpool_explicit_padding(%arg0: tensor<2x12x20x7xi32>) -> tensor<2x3x5x7xi32> {
      // CHECK: tf.MaxPool
      // TODO(b/165938852): need to support explicit padding in max_pool.
    
      %0 = "tf.MaxPool"(%arg0) {data_format = "NHWC", ksize = [1, 2, 2, 1], padding = "EXPLICIT", strides = [1, 4, 4, 1]} : (tensor<2x12x20x7xi32>) -> tensor<2x3x5x7xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/mark_for_compilation_pass.cc

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
Back to top