Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 20 for TF1 (0.04 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/python/representative_dataset.py

        Raises:
          KeyError: If the set of input keys in the dataset samples doesn't match
          the set of expected input keys.
        """
        # When running in graph mode (TF1), tf.Tensor types should be converted to
        # numpy ndarray types to be compatible with `make_tensor_proto`.
        if not context.executing_eagerly():
          with session.Session() as sess:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 14.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.py

        representative_dataset: rd.RepresentativeDataset,
    ) -> None:
      """Runs the representative dataset through a function for calibration.
    
      NOTE: This is intended to be run in graph mode (TF1).
    
      The function is identified by the SignatureDef.
    
      Args:
        sess: The Session object to run the function in.
        signature_def: A SignatureDef that identifies a function by specifying the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 05:32:11 UTC 2024
    - 27.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/flags.cc

      absl::call_once(flags_init, &AllocateAndParseFlags);
      return *jitrt_flags;
    }
    
    ConfigProto::Experimental::MlirBridgeRollout GetMlirBridgeRolloutState(
        std::optional<const ConfigProto> config_proto) {
      // TF1 graphs that do not override Sessions's ConfigProto and TF2 graphs
      // can enable/disable the graph via tf_mlir_enable_mlir_bridge.
      auto tf_mlir_enable_mlir_bridge =
          GetMlirCommonFlags()->tf_mlir_enable_mlir_bridge;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 17 18:52:57 UTC 2024
    - 24.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

          filter_shape: Sequence[int] = (2, 3, 3, 2),
          use_variable_for_filter=False,
      ) -> Tuple[core.Tensor, core.Tensor]:
        """Creates a basic convolution model.
    
        This is intended to be used for TF1 (graph mode) tests.
    
        Args:
          input_shape: Shape of the input tensor.
          filter_shape: Shape of the filter.
          use_variable_for_filter: Setting this to `True` makes the filter for the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf.cc

      return status;
    }
    
    void CreateReplicatedClusteringPipeline(OpPassManager &pm,
                                            llvm::StringRef module_name) {
      // Since the internal bridge clustering passes are shared among TF1/TF2
      // TF2-only passes should go here. However, this should be very rare and
      // new passes generally should go into the internal
      // AddReplicatedBridgeClusteringPipelinePasses.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 28 22:25:18 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

      // or ambiguity in this unit-wise precision, our quantizer will raise an
      // error.
      repeated UnitWiseQuantizationSpec unit_wise_quantization_specs = 17;
    
      // (TF1 SavedModel only) Collection of tags identifying the MetaGraphDef
      // within the SavedModel to analyze. If not specified, ["serve"] is used.
      repeated string tags = 5;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  7. tensorflow/c/experimental/saved_model/core/tf_saved_model_api.cc

        const std::string& directory,
        const absl::optional<std::unordered_set<std::string>>& tags,
        ImmediateExecutionContext* context, std::unique_ptr<TFSavedModelAPI>* out) {
      // TODO(bmzhao): Add support for loading a TF1 SavedModel.
      if (tags) {
        return errors::Unimplemented(
            "Loading saved models with explicit tags will be supported in the "
            "future");
      }
    
      SavedModelV2Bundle bundle;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 27 09:34:33 UTC 2024
    - 14.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

    from tensorflow.python.types import core
    
    _CalibrationMethod = qc.CalibrationOptions.CalibrationMethod
    
    
    # Test cases for Static Range Quantization.
    # Tries to run all tests cases in both the graph mode (default in TF1) and the
    # eager mode (default in TF2) to ensure support for when TF2 is disabled.
    class StaticRangeQuantizationTest(quantize_model_test_base.QuantizedModelTest):
    
      @parameterized.parameters(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

    @test_util.run_all_in_graph_and_eager_modes
    class QuantizationOptionsTest(quantize_model_test_base.QuantizedModelTest):
      """Test cases regarding the use of QuantizationOptions proto.
    
      Run all tests cases in both the graph mode (default in TF1) and the eager mode
      (default in TF2) to ensure support for when TF2 is disabled.
      """
    
      class SimpleModel(module.Module):
    
        def __init__(self):
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
  10. RELEASE.md

            checkpoint, please change your optimizer to
            `tf.keras.optimizers.legacy.XXX` (e.g.
            `tf.keras.optimizers.legacy.Adam`).
        *   **TF1 compatibility.** The new optimizer does not support TF1 any more,
            so please use the legacy optimizer `tf.keras.optimizer.legacy.XXX`.
            We highly recommend to migrate your workflow to TF2 for stable
            support and new features.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 730.3K bytes
    - Viewed (0)
Back to top