Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 15 for deim (0.28 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-composite-functions-tf.mlir

    "tfdtype$DT_FLOAT", "tfdtype$DT_FLOAT"], Tout = ["tfdtype$DT_FLOAT", "tfdtype$DT_FLOAT", "tfdtype$DT_FLOAT", "tfdtype$DT_FLOAT", "tfdtype$DT_FLOAT"], _output_shapes = ["tfshape$dim { size: 9 } dim { size: 10 }", "tfshape$dim { size: -1 } dim { size: 9 } dim { size: 10 }", "tfshape$dim { size: 8 } dim { size: 10 }", "tfshape$dim { size: 8 } dim { size: 10 }", "tfshape$"], _read_only_resource_inputs = [], config = "", config_proto = "\0A\07\0A\03CPU\10\01\0A\07\0A\03GPU\10\002\02J\008\01", device = "", executor_type...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 122.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc

        int64_t index = item.index();
        int64_t dim = item.value().getSExtValue();
        if (dim < 0 || dim > output_rank) {
          return emitOptionalError(location, "out of range broadcast dim");
        }
        if (is_broadcasted[dim]) {
          return emitOptionalError(location, "broadcast_dims has duplicates");
        }
        broadcast_shape[dim] = min_rank_ty.getDimSize(index);
        is_broadcasted[dim] = true;
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 170.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc

      for (size_t i = 0; i < window_dimensions.size(); i++) {
        auto dim = window.add_dimensions();
        dim->set_size(window_dimensions[i]);
        if (!window_strides.empty()) {
          dim->set_stride(window_strides[i]);
        } else {
          dim->set_stride(1);
        }
        if (!padding.empty()) {
          dim->set_padding_low(padding[i].first);
          dim->set_padding_high(padding[i].second);
        } else {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 08 07:28:49 UTC 2024
    - 134.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/ir/tfl_ops.cc

                                   << output_rank << ", got " << operand_rank;
    
        for (int64_t dim = 0; dim < output_rank; ++dim) {
          const int64_t operand_dim_size = operand_type.getDimSize(dim);
          const int64_t result_dim_size = result_dim_sizes[dim];
    
          if (dim == axis) {
            if (ShapedType::isDynamic(operand_dim_size) ||
                ShapedType::isDynamic(result_dim_size)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 169.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc

        // [[0, 0], [h_low, h_high], [w_low, w_high], [0, 0]]
        SmallVector<int32_t, 8> tfl_pad_values = {0, 0};  // For output feature dim.
        for (const int64_t padding_value : padding_values) {
          tfl_pad_values.push_back(CastI64ToI32(padding_value).value());
        }
        // For input feature dim.
        tfl_pad_values.push_back(0);
        tfl_pad_values.push_back(0);
    
        const auto input_tensor_type =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 09:00:19 UTC 2024
    - 99.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/optimize.cc

      // so we could cast safely here.
      auto type = mlir::cast<ShapedType>(value.getType());
      SmallVector<int> new_shape;
      if (type.hasStaticShape()) {
        for (int64_t dim : type.getShape().drop_back()) {
          new_shape.push_back(dim);
        }
      } else {
        new_shape.push_back(-1);
      }
      return builder.create<ReshapeOp>(
          value.getLoc(), value,
          builder.create<arith::ConstantOp>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/tpu_rewrite.mlir

        // CHECK:      metadata
        // CHECK-SAME: args
        // CHECK-SAME: shape {\0A dim {\0A size: -1\0A }\0A dim {\0A size: -1\0A }\0A dim {\0A size: 3\0A }\0A }
        func.return %0: tensor<?x?x3xi32>
      }
      func.func @_func(%arg0: tensor<?x?x3xi32>) -> tensor<?x?x3xi32> {
        func.return %arg0 : tensor<?x?x3xi32>
      }
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 22:03:30 UTC 2024
    - 172.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/flatbuffer_export.cc

        shape.reserve(shape_ref.size());
        for (auto& dim : shape_ref) {
          // translate dynamic shapes from mlir to tfl values
          shape.push_back(
              dim == mlir::ShapedType::kDynamic ? 1 : static_cast<int>(dim));
          shape_signature.push_back(static_cast<int>(
              dim == mlir::ShapedType::kDynamic ? tensorflow::kTFDynamicSize
                                                : dim));
        }
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:41:49 UTC 2024
    - 164.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/extract_outside_compilation.mlir

        // CHECK:         }, {
        // CHECK:           "tf_device.cluster"
        // CHECK:             %[[A:.+]] = "tf.OpA"
        // CHECK:             %[[A_SHARD:.+]] = "tf.XlaSpmdFullToShardShape"(%[[A]]) <{dim = -1 : i64, manual_sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []}> : (tensor<2x2xi64>) -> tensor<1x2xi64>
        // CHECK:             %[[B:.+]] = "tf._XlaHostComputeMlir"(%[[A_SHARD]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:59:10 UTC 2023
    - 129.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/translate/import_model.cc

        }
        if (c->Rank(s0) != c->Rank(s1)) {
          return false;
        }
        for (int i = 0; i < c->Rank(s0); ++i) {
          if (!c->Dim(s0, i).SameHandle(c->Dim(s1, i))) {
            int64_t val0 = c->Value(c->Dim(s0, i));
            int64_t val1 = c->Value(c->Dim(s1, i));
            // Negative value is treated as unknown so all negative values indicate
            // the same dimension.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 183.2K bytes
    - Viewed (0)
Back to top