Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 15 of 15 for pushBack (0.13 sec)

  1. tensorflow/compiler/mlir/lite/utils/string_utils.cc

      if (len > max_length_ || data_.size() >= max_length_ - len)
        return absl::ResourceExhaustedError("Buffer overflow");
      data_.resize(data_.size() + len);
      memcpy(data_.data() + offset_.back(), str, len);
      offset_.push_back(offset_.back() + len);
      return absl::OkStatus();
    }
    
    int MiniDynamicBuffer::WriteToBuffer(char** buffer) {
      // Allocate sufficient memory to tensor buffer.
      int32_t num_strings = offset_.size() - 1;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:41:49 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

        output->quantization = std::make_unique<QuantizationParametersT>();
        input->quantization->min.push_back(0.0);
        output->quantization->min.push_back(0.0);
        input->quantization->max.push_back(6.0);
        output->quantization->max.push_back(6.0);
      }
      TensorType tensor_type_;
    };
    
    INSTANTIATE_TEST_SUITE_P(QuantizeConvModelTestInst, QuantizeConvModelTest,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc

      for (const auto& pass_registration : registry_->passes()) {
        MlirOptimizationPassState pass_state = pass_registration.pass->GetPassState(
            &device_set, config_proto, **graph, *flib_def);
        per_pass_state.push_back(pass_state);
        switch (pass_state) {
          case MlirOptimizationPassState::FallbackEnabled: {
            if (overall_state != MlirOptimizationPassState::Enabled)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 22:19:26 UTC 2024
    - 18.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

            // different between float and quantized tensors. So do those tests
            // separately in the test body without checking them here.
            used_tensors.push_back(i);
            return float_tensor;
          } else {
            // Otherwise, do additional checks for data type and buffer contents.
            const std::vector<uint8_t> quantized_buffer =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/python/converter_python_api.cc

      mlir::TFL::ErrorCollector* collector =
          mlir::TFL::ErrorCollector::GetErrorCollector();
      std::vector<std::string> collected_errors;
      for (const auto& error_data : collector->CollectedErrors()) {
        collected_errors.push_back(error_data.SerializeAsString());
      }
      collector->Clear();
      return collected_errors;
    }
    
    std::string FlatBufferFileToMlir(const std::string& model,
                                     bool input_is_filepath) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 19.2K bytes
    - Viewed (0)
Back to top