Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 24 for dim2 (0.07 sec)

  1. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc

      for (const auto &item : llvm::enumerate(dims)) {
        int64_t index = item.index();
        int64_t dim = item.value().getSExtValue();
        if (dim < 0 || dim > output_rank) {
          return emitOptionalError(location, "out of range broadcast dim");
        }
        if (is_broadcasted[dim]) {
          return emitOptionalError(location, "broadcast_dims has duplicates");
        }
        broadcast_shape[dim] = min_rank_ty.getDimSize(index);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 170.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc

        const int64_t rank = type.getRank();
        for (const int64_t dim : batch_dimensions) {
          batch_dimensions_.axes.push_back(dim);
          batch_dimensions_.sizes.push_back(type.getDimSize(dim));
        }
    
        for (const int64_t dim : contracting_dimensions) {
          contracting_dimensions_.axes.push_back(dim);
          contracting_dimensions_.sizes.push_back(type.getDimSize(dim));
        }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 154.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc

      llvm::SmallVector<int64_t, 4> dims;
      dims.reserve(lhs.getRank());
      for (auto dim : llvm::zip(lhs.getShape(), rhs.getShape())) {
        int64_t lhs_dim = std::get<0>(dim);
        if (lhs_dim == std::get<1>(dim)) {
          dims.push_back(lhs_dim);
        } else {
          dims.push_back(ShapedType::kDynamic);
        }
      }
      return tensorflow::GetTypeFromTFTensorShape(
          dims, GetElementTypeFromOperand(lhs, rhs));
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 08 07:28:49 UTC 2024
    - 134.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc

      if (rank == 1) {
        int64_t dim0 = input_ty.getDimSize(0);
        if (dim0 != ShapedType::kDynamic && dim0 != 4 && dim0 != 2)
          return op.emitOpError("requires 1D input of size 4 or size 2");
      }
    
      if (rank == 2) {
        int64_t dim0 = input_ty.getDimSize(0);
        if (dim0 != ShapedType::kDynamic && dim0 != 4)
          return op.emitOpError(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 146.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-composite-functions-tf.mlir

    "tfdtype$DT_FLOAT", "tfdtype$DT_FLOAT"], Tout = ["tfdtype$DT_FLOAT", "tfdtype$DT_FLOAT", "tfdtype$DT_FLOAT", "tfdtype$DT_FLOAT", "tfdtype$DT_FLOAT"], _output_shapes = ["tfshape$dim { size: 9 } dim { size: 10 }", "tfshape$dim { size: -1 } dim { size: 9 } dim { size: 10 }", "tfshape$dim { size: 8 } dim { size: 10 }", "tfshape$dim { size: 8 } dim { size: 10 }", "tfshape$"], _read_only_resource_inputs = [], config = "", config_proto = "\0A\07\0A\03CPU\10\01\0A\07\0A\03GPU\10\002\02J\008\01", device = "", executor_type...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 122.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

    // Returns true if the n-th operand is ranked and has rank dim.
    class TFL_OperandHasKnownRank<int n, int dim> : And<[
      CPred<"$_op.getOperand(" # n # ").getType().isa<RankedTensorType>()">,
      CPred<"$_op.getOperand(" # n # ").getType().cast<ShapedType>().getRank() == "
        # dim>]>;
    
    // True if operand n is ranked and has a rank > dim.
    class TFL_OperandIsRankedAndHasDimPred<int n, int dim> : And<[
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/ir/tfl_ops.cc

                                   << output_rank << ", got " << operand_rank;
    
        for (int64_t dim = 0; dim < output_rank; ++dim) {
          const int64_t operand_dim_size = operand_type.getDimSize(dim);
          const int64_t result_dim_size = result_dim_sizes[dim];
    
          if (dim == axis) {
            if (ShapedType::isDynamic(operand_dim_size) ||
                ShapedType::isDynamic(result_dim_size)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 169.2K bytes
    - Viewed (0)
  8. tensorflow/c/c_api.cc

      static char empty;
      int64_t nelems = 1;
      std::vector<int64_t> dims;
      dims.reserve(shape.dims());
      for (int i = 0; i < shape.dims(); ++i) {
        dims.push_back(shape.dim_size(i));
        nelems *= shape.dim_size(i);
      }
      CHECK_EQ(nelems, 0);
      return TF_NewTensor(
          dtype, reinterpret_cast<const int64_t*>(dims.data()), shape.dims(),
          reinterpret_cast<void*>(&empty), 0, [](void*, size_t, void*) {}, nullptr);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 03:35:10 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc

          TensorType dim_type = input_type.cloneWith({static_cast<int64_t>(1)},
                                                     rewriter.getI32Type());
          ArrayRef<int32_t> dims(dim_to_expand);
          auto dim_attr = DenseIntElementsAttr::get(dim_type, dims);
          auto dim = rewriter.create<arith::ConstantOp>(op.getLoc(), dim_attr);
    
          input_shape.insert(input_shape.begin() + dim_to_expand, 1);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 09:00:19 UTC 2024
    - 99.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir

    // CHECK: "tfl.batch_matmul"(%[[ARG]], %[[QCONST_0]]) <{adj_x = false, adj_y = true}>
    
    // -----
    
    // Tests static range quantized dot_general with wrong batch dims
    
    func.func @dot_general_upstream_srq_too_many_batches(%arg0: tensor<1x1x1x2x3x4x!quant.uniform<i8:f32, 1.000000e+0>>) -> tensor<1x1x1x2x3x5x!quant.uniform<i8:f32, 4.000000e+0>> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 106.2K bytes
    - Viewed (0)
Back to top