- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 250 for more (0.04 sec)
-
tensorflow/compiler/mlir/lite/tf_tfl_passes.h
void AddPostQuantizationStableHloToTfPasses( const mlir::TFL::PassConfig& pass_config, mlir::OpPassManager& pass_manager); // This is the early part of the conversion in isolation. This enables a caller // to inject more information in the middle of the conversion before resuming it // (like freezing variables for example). void AddPreVariableFreezingTFToTFLConversionPasses( const mlir::TFL::PassConfig& pass_config,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 01 06:14:07 UTC 2024 - 4.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/dump_graph.h
#include <optional> #include <string> #include "mlir/IR/OperationSupport.h" // from @llvm-project #include "tensorflow/core/framework/function.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/platform/status.h" namespace tensorflow { struct MlirDumpConfig; // Dumps 'graph_def' to a file, as textual IR. Returns the file name chosen. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Dec 24 09:43:29 UTC 2022 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h
// This applies both the PassManagerCLOptions provided by MLIR along with any // tensorflow specific options. // // Note that this function should be in a more appropriate file, but it is // unclear what a proper file would be as no other functions would currently be // in the file also. void applyTensorflowAndCLOptions(mlir::PassManager& pm,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 04:50:13 UTC 2023 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/common/targets.h
// Inference type. constexpr char kInferenceType[] = "tac.inference_type"; // Inference type. constexpr char kSkipTargetAnnotation[] = "tac.skip_target_annotation"; // TODO(renjieliu): Add more inference types. enum InferenceType { UNKNOWN = 0, FLOAT = 1, QUANTIZED_INT8 = 2, QUANTIZED_UINT8 = 3, HYBRID = 4 }; inline InferenceType GetInferenceTypeEnum(llvm::StringRef inference_type_str) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 03:08:33 UTC 2023 - 4.7K bytes - Viewed (0) -
tensorflow/c/eager/abstract_tensor_handle.h
#include <memory> #include "tensorflow/core/framework/full_type.pb.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/refcount.h" #include "tensorflow/core/platform/status.h" namespace tensorflow { // Abstract interface to a Tensor handle in either tracing or immediate // execution mode. class AbstractTensorHandle : public core::RefCounted {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 00:30:36 UTC 2023 - 3K bytes - Viewed (0) -
tensorflow/c/checkpoint_reader.h
#include <memory> #include <string> #include "tensorflow/c/tf_status_helper.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/tensor_bundle/tensor_bundle.h" #include "tensorflow/core/util/tensor_slice_reader.h" namespace tensorflow { namespace checkpoint { class TensorSliceReader;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 12 08:49:52 UTC 2023 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/hardwares/simple_hardware.h
// The larger the value is, the more preferrable over CPU. // If the value > 1, means the hardware has advantage over CPU. // If the value < 1, means CPU is more preferred. // If we specify 10.0, meaning the hardware is 10x faster than CPU. // The value should be > 0. // TODO(renjieliu): Consider add an interface for more detailed customization,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jul 21 01:22:53 UTC 2021 - 2.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/mlir_import_options.h
// TODO(jpienaar): This file and class are confusingly named. This seems to be // a SavedModel only import options file that exposes a subset of the // GraphImportConfig options, but the naming would make one think it is more // general. struct MLIRImportOptions { // If true, functionalize the input graph before importing it into MLIR. bool upgrade_legacy = false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 20 13:19:26 UTC 2023 - 2.5K bytes - Viewed (0) -
tensorflow/c/eager/unified_api_testutil.h
absl::Span<AbstractTensorHandle* const> inputs, std::vector<AbstractTensorHandle*>* params); // A callable that takes tensor inputs and returns zero or more tensor outputs. using Model = std::function<Status(AbstractContext*, absl::Span<AbstractTensorHandle* const>, absl::Span<AbstractTensorHandle*>)>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Feb 27 13:57:45 UTC 2024 - 4K bytes - Viewed (0) -
tensorflow/c/c_api.h
// will fail. If `prefix` is nullptr, the default prefixing behaviour takes // place, see TF_AddGradients for more details. // // WARNING: This function does not yet support all the gradients that python // supports. See // https://www.tensorflow.org/code/tensorflow/cc/gradients/README.md // for instructions on how to add C++ more gradients. TF_CAPI_EXPORT void TF_AddGradientsWithPrefix(TF_Graph* g, const char* prefix,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 26 21:08:15 UTC 2023 - 82.3K bytes - Viewed (0)