Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 58 for num_elements (0.16 sec)

  1. tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc

        auto get_num_elements = [](RankedTensorType tensor) {
          int num_elements = 1;
          for (int i = 0; i < tensor.getRank(); ++i) {
            // Assume dynamic dim size as the dim size one.
            if (!tensor.isDynamicDim(i)) {
              num_elements *= tensor.getDimSize(i);
            }
          }
          return num_elements;
        };
    
        // If the op is the pass-through op with (3x) smaller output, the dequantize
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/tests/device_compiler_test_helper.cc

        TF_RETURN_IF_ERROR(session->Create(graph));
        RunOptions run_options;
    
        Tensor input_a = CreateInputTensor(shape, 0);
        Tensor input_b = CreateInputTensor(shape, shape.num_elements());
        Tensor input_c = CreateInputTensor(shape, 2 * shape.num_elements());
        TF_RETURN_IF_ERROR(session->Run(
            run_options,
            {std::make_pair("a", input_a), std::make_pair("b", input_b),
             std::make_pair("c", input_c)},
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 09 08:24:16 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  3. tensorflow/cc/experimental/base/tests/tensor_test.cc

      EXPECT_EQ(tensor.dtype(), dtype);
      EXPECT_EQ(*reinterpret_cast<typename TypeParam::type*>(tensor.data()), 42);
      EXPECT_EQ(tensor.num_bytes(), sizeof(typename TypeParam::type));
      EXPECT_EQ(tensor.num_elements(), 1);
    }
    
    template <typename T>
    class Construct1DTensorTest : public ::testing::Test {};
    TYPED_TEST_SUITE(Construct1DTensorTest, SimpleTypes);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 09:56:08 UTC 2024
    - 6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc

        return {};
      }
    
      int64_t num_elements = value_shape.getNumElements();
      SmallVector<int64_t> new_shape;
      for (auto idx : llvm::reverse(llvm::seq<int32_t>(0, rhs_shape.getRank()))) {
        const int64_t rhs_dim = rhs_shape.getDimSize(idx);
        if (num_elements % rhs_dim != 0) {
          return {};
        }
        new_shape.push_back(rhs_dim);
        num_elements = num_elements / rhs_dim;
        if (num_elements == 1) break;
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/fold_broadcast_pass.cc

        operand_new_shape[dimensions[i]] = operand.getType().getDimSize(i);
      }
    
      llvm::SmallVector<ElementValueT, 16> new_values;
      auto num_elements = result_type.getNumElements();
      new_values.reserve(num_elements);
      auto operand_values = operand.getValues<ElementValueT>();
      for (int64_t i = 0; i < num_elements; ++i) {
        const int64_t operand_index =
            GetElementIndex(operand_new_shape, current_index);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc

        int num_elements_threshold = quant_options_.min_num_elements_for_weights();
        int num_elements = cast<ShapedType>(op.getType()).getNumElements();
        if (num_elements < num_elements_threshold) {
          op->emitRemark("Quantization is skipped because the op has ")
              << num_elements << " elements which is fewer than the threshold("
              << num_elements_threshold << " elements).";
          return failure();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 11.3K bytes
    - Viewed (0)
  7. tensorflow/cc/experimental/base/tests/tensorhandle_test.cc

      EXPECT_EQ(tensor.dtype(), dtype);
      EXPECT_EQ(*reinterpret_cast<typename TypeParam::type*>(tensor.data()), 42);
      EXPECT_EQ(tensor.num_bytes(), sizeof(typename TypeParam::type));
      EXPECT_EQ(tensor.num_elements(), 1);
    }
    
    template <typename T>
    class Construct1DTensorHandleTest : public ::testing::Test {};
    TYPED_TEST_SUITE(Construct1DTensorHandleTest, SimpleTypes);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 09:56:08 UTC 2024
    - 6.9K bytes
    - Viewed (0)
  8. tensorflow/cc/framework/gradient_checker.cc

        const int64_t x_size =
            x_shapes[x_idx].num_elements() * JacobianStride<X_T>::value;
        for (int y_idx = 0; y_idx < y_num; y_idx++) {
          // The number of columns is the number of elements in the y tensor
          // multiplied by the number of Jacobian entries needed to represent each
          // y type.
          const int64_t y_size =
              y_shapes[y_idx].num_elements() * JacobianStride<Y_T>::value;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 05:57:22 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc

          b_size->push_back(block_size[i]);
        }
      }
    }
    
    inline float GetSparsity(const int num_zeros, const int num_elements) {
      return (1.0 * num_zeros / num_elements);
    }
    
    float CalculateRandomSparsity(const ElementsAttr& attr,
                                  const ShapedType& type) {
      int num_elements = type.getNumElements();
      int num_zeros = 0;
    
      if (mlir::isa<FloatType>(type.getElementType())) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 16.1K bytes
    - Viewed (0)
  10. tensorflow/c/eager/c_api_test_util.cc

      return th;
    }
    
    TFE_TensorHandle* TestMatrixTensorHandle100x100(TFE_Context* ctx) {
      constexpr int64_t dims[] = {100, 100};
      constexpr int num_elements = dims[0] * dims[1];
      float data[num_elements];
      for (int i = 0; i < num_elements; ++i) {
        data[i] = 1.0f;
      }
      TF_Status* status = TF_NewStatus();
      TF_Tensor* t = TFE_AllocateHostTensor(ctx, TF_FLOAT, &dims[0],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 22:37:46 UTC 2024
    - 23.5K bytes
    - Viewed (0)
Back to top