Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for shape_shape (0.19 sec)

  1. tensorflow/compiler/jit/mark_for_compilation_pass_test.cc

      EXPECT_TRUE(clusters.empty());
    }
    
    TEST(XlaCompilationTest, RandomShape) {
      Scope root = Scope::NewRootScope().ExitOnError();
      Output shape_shape = ops::Const(root.WithOpName("shape_shape"), {2}, {1});
      Output shape =
          ops::RandomUniformInt(root.WithOpName("shape"), shape_shape,
                                ops::Const(root.WithOpName("minval"), 1),
                                ops::Const(root.WithOpName("maxval"), 20));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 10:11:10 UTC 2024
    - 79.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/optimize.cc

        // TODO(rahulsp) : Handle this case with more careful checks.
        if (reshape_shape.size() < non_unit_dims.size()) return failure();
    
        SmallVector<int64_t, 4> old_reshape_non_unit_dims;
        SmallVector<int64_t, 4> new_reshape_dims;
        int new_reshape_dim_idx = 0;
        for (int64_t dim : reshape_shape) {
          int new_reshape_dim = 1;
          if (dim != 1) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  3. tensorflow/cc/gradients/nn_grad_test.cc

      int channel_dim = (channel_first) ? 1 : shape.dims() - 1;
      TensorShape scale_shape({shape.dim_size(channel_dim)});
      auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
      auto scale = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(scale_shape));
      auto offset = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(scale_shape));
      auto mean = ops::ZerosLike(scope_, scale);
      auto var = ops::OnesLike(scope_, scale);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 22 20:45:22 UTC 2022
    - 15K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc

          rewriter.getBoolAttr(false));
    
      bool out_reshape_need = (reshape_shape.size() != matmul_shape.size() ||
                               original_type.getRank() != matmul_shape.size());
      // Always add reshape for concrete output shapes.
      if (succeeded(VerifyShapeOfReshapeOp(reshape_shape))) {
        out = createReshapeOp(out, reshape_shape, original_type.getElementType(),
                              op.getLoc(), &rewriter);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfr/ir/tfr_ops.cc

                                 input_vector_type.getElementType();
        bool same_shape =
            output_tensor_type.getShape() == input_vector_type.getShape();
        if (!same_element_type || !same_shape) {
          op.emitError("input and output should have same shape and element type.");
        }
        return success(same_element_type && same_shape);
      }
    
      op.emitError("input can not be converted to an output tensor.");
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Nov 21 16:55:41 UTC 2023
    - 38.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/optimize.cc

        // dimension.
        if (output_shape.getShape().empty() || reshape_shape.getShape().empty() ||
            output_shape.getShape().back() != reshape_shape.getShape().back() ||
            input_shape.getShape().drop_back() !=
                reshape_shape.getShape().drop_back())
          return failure();
    
        llvm::SmallVector<Type, 1> output_type{reshape_op.getType()};
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

            if self.has_reshape():
              input_shape = input_tensor.shape
              if len(input_shape) == 3:
                reshape_shape = (input_shape[0], -1, self.bias_size)
              else:
                reshape_shape = (-1, self.bias_size)
    
              out = array_ops.reshape(out, reshape_shape)
    
            if self.has_bias():
              if self.use_biasadd:
                out = nn_ops.bias_add(out, self.bias)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc

        if (type.getRank() > 1) {
          std::vector<int64_t> reshape_shape(type.getRank(), 1);
          reshape_shape[iota_op.getIotaDimension()] = type.getShape()[dimension];
          auto reshape_type = RankedTensorType::get(reshape_shape, element_type);
          Value reshape_shape_op = rewriter.create<TF::ConstOp>(
              iota_op.getLoc(), rewriter.getI64TensorAttr(reshape_shape));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 154.9K bytes
    - Viewed (0)
Back to top