Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 15 for SpaceToBatchND (0.52 sec)

  1. tensorflow/compiler/mlir/lite/transforms/dilated_conv.h

    //
    //
    //   SpaceToBatchND -> Expand -> Conv2D -> Squeeze -> BatchToSpaceND -> BiasAdd
    //
    //   SpaceToBatchND -> Expand -> Conv2D -> Squeeze -> Pad -> BatchToSpaceND ->
    //   BiasAdd
    //
    //   SpaceToBatchND -> Expand -> Conv2D -> Squeeze -> BiasAdd -> BatchToSpaceND
    //
    //   SpaceToBatchND -> Conv2D -> Pad -> BatchToSpaceND -> BiasAdd
    //
    //   SpaceToBatchND -> Conv2D -> BatchToSpaceND -> BiasAdd
    //
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/dilated-conv.mlir

      %cst = arith.constant dense<[2, 2]> : tensor<2xi32>
      %cst_0 = arith.constant dense<4> : tensor<2x2xi32>
      %0 = "tf.SpaceToBatchND"(%arg0, %cst, %cst_0) : (tensor<1x128x128x3xf32>, tensor<2xi32>, tensor<2x2xi32>) -> tensor<4x68x68x3xf32>
      %1 = "tf.SpaceToBatchND"(%arg0, %cst, %cst_0) : (tensor<1x128x128x3xf32>, tensor<2xi32>, tensor<2x2xi32>) -> tensor<4x68x68x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 44.7K bytes
    - Viewed (0)
  3. tensorflow/cc/framework/fuzzing/op_fuzzing.bzl

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 07 19:14:57 UTC 2022
    - 4.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/tf-ops.mlir

    //  tf.SpaceToBatchND
    //===--------------------------------------------------------------------===//
    
    // Test valid tf.SpaceToBatchND
    // CHECK-LABEL: func @testSpaceToBatchND
    func.func @testSpaceToBatchND(%input: tensor<3x5x7x10xf32>, %block_shape: tensor<2xi64>, %paddings: tensor<2x2xi64>) -> tensor<?x?x?x10xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 23 14:40:35 UTC 2023
    - 236.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir

      // CHECK-DAG: [[RESULT:%.+]] = "tf.Reshape"([[PERMUTED]], [[OUTPUT_SHAPE]])
      // CHECK-DAG: return [[RESULT]]
      %0 = "tf.SpaceToBatchND"(%input, %block_shape, %paddings) : (tensor<3x5x7x10xf32>, tensor<2xi64>, tensor<2x2xi64>) -> tensor<?x?x?x10xf32>
      func.return %0 : tensor<?x?x?x10xf32>
    }
    
    // Verify SpaceToBatchND with input tensor of element type f16. This test case is derived from 'fourdim_space_to_batch_nd'. It checks the output
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 92K bytes
    - Viewed (0)
  6. tensorflow/cc/gradients/array_grad.cc

          BatchToSpaceND(scope, grad_inputs[0], op.input(1), op.input(2)));
      grad_outputs->push_back(NoGradient());
      grad_outputs->push_back(NoGradient());
      return scope.status();
    }
    REGISTER_GRADIENT_OP("SpaceToBatchND", SpaceToBatchNDGrad);
    
    Status BatchToSpaceGrad(const Scope& scope, const Operation& op,
                            const std::vector<Output>& grad_inputs,
                            std::vector<Output>* grad_outputs) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 10 23:33:32 UTC 2023
    - 31.7K bytes
    - Viewed (0)
  7. tensorflow/cc/gradients/array_grad_test.cc

      auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
      auto block_shape = Const(scope_, {2, 2});
      auto paddings = Const(scope_, {{0, 0}, {2, 0}});
      TensorShape y_shape({8, 1, 3, 1});
      auto y = SpaceToBatchND(scope_, x, block_shape, paddings);
      RunTest(x, x_shape, y, y_shape);
    }
    
    TEST_F(ArrayGradTest, BatchToSpaceGrad) {
      TensorShape x_shape({4, 2, 2, 1});
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 10 23:33:32 UTC 2023
    - 19.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

                                                axis_value);
        return success();
      }
    };
    
    // Lowers SpaceToBatchND by reducing to reshape(transpose(reshape(pad(input)))).
    //
    // Before rewrite:
    //   output = SpaceToBatchND(input, block_shape, paddings)
    // Let:
    //   [batch] + spatial_shape + remaining_shape = input.shape
    //   M = spatial_shape.rank
    // After rewrite:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

      }
    
      if (failed(ConvertTf2XlaOps(func, ctx))) {
        signalPassFailure();
        return;
      }
    
      // This pattern will try to identify and optimize for dilated convolution.
      // e.g. Patterns like "SpaceToBatchND -> Conv2D -> BatchToSpaceND" will be
      // replaced with a single Conv op with dilation parameter.
      patterns.add<ConvertTFDilatedConvOp<TF::Conv2DOp>, FusedBatchNormV3Pat,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

      // CHECK: "tfl.cast"
      // CHECK: "tfl.batch_to_space_nd"
    }
    
    func.func @space_to_batch_nd(%arg0: tensor<1x4x4x3xf32>, %arg1: tensor<2xi32>, %arg2: tensor<2x2xi32>) -> tensor<*xf32> {
      %0 = "tf.SpaceToBatchND"(%arg0, %arg1, %arg2) : (tensor<1x4x4x3xf32>, tensor<2xi32>, tensor<2x2xi32>) -> tensor<*xf32>
      func.return %0 : tensor<*xf32>
      // CHECK-LABEL: space_to_batch_nd
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
Back to top