Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for output_node (0.27 sec)

  1. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

      const uint8_t* buffer = builder.GetBufferPointer();
      const Model* output_model = GetModel(buffer);
      ASSERT_TRUE(output_model);
    
      // Nothing should change.
      ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size());
      for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size();
           subgraph_idx++) {
        const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/sparsity/sparsify_model_test.cc

          output_builder.GetSize());
      tflite::ModelT output_model;
      output_fbm->GetModel()->UnPackTo(&output_model);
    
      // Extract output metadata
      std::map<std::string, std::string> output_metadata;
      for (const auto& metadata : output_model.metadata) {
        const auto& data = output_model.buffers[metadata->buffer]->data;
        output_metadata[metadata->name] = std::string(data.begin(), data.end());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:16:40 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/python/converter_python_api.cc

          FromTocoDataTypeToTflitToTensorType(output_data_type);
    
      std::string output_model;
      const absl::string_view input_model_buffer(buf, length);
      auto status = mlir::lite::QuantizeModel(
          input_model_buffer, input_type, output_type, inference_tensor_type,
          /*operator_names=*/{}, disable_per_channel, fully_quantize, output_model,
          enable_numeric_verify, enable_whole_model_verify,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 19.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

                                              output_buffer_);
      EXPECT_THAT(status, Eq(kTfLiteOk));
    
      const Model* output_model = GetModel(output_buffer_.data());
      ASSERT_TRUE(output_model);
    }
    
    TEST_P(QuantizeConvModelTest, SkipUnspecifiedLayer) {
      auto status = QuantizeModel(&model_, TensorType_FLOAT32, TensorType_FLOAT32,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
  5. RELEASE.md

        *   Added an `output_mode` argument to the `Discretization` and `Hashing`
            layers with the same semantics as other preprocessing layers. All
            categorical preprocessing layers now support `output_mode`.
        *   All preprocessing layer output will follow the compute dtype of a
            `tf.keras.mixed_precision.Policy`, unless constructed with
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 730.3K bytes
    - Viewed (0)
Back to top