- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 134 for _xla (0.04 sec)
-
tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.cc
std::vector<tpu::ShardingAndIndex>* arg_core_mapping, std::vector<std::vector<xla::Shape>>* per_core_arg_shapes, std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes, xla::CompileOnlyClient* client, XlaCompilationResult* compilation_result) { LOG_FIRST_N(INFO, 1) << "Compiling MLIR computation to XLA HLO using the " "Combined MLIR Tf2Xla Bridge.";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Apr 14 20:29:34 UTC 2024 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc
llvm::cl::desc("Choose target opset."), llvm::cl::values( clEnumValN(OpSet::TF, "TF", "Uses TF ops that mimic quantization behavior"), clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"), clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED", "Uses TF Uniform Quantized ops"))}; }; llvm::StringRef InsertQuantizedFunctionsPass::GetFunctionLibrary(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 8.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util_test.cc
} absl::Status BuildHloFromGraph(Graph& graph, bool use_output_shapes) { xla::XlaBuilder builder( ::testing::UnitTest::GetInstance()->current_test_info()->name()); mlir::MLIRContext mlir_context; llvm::SmallVector<xla::XlaOp, 4> xla_params; std::vector<xla::XlaOp> returns(1); return BuildHloFromGraph(graph, builder, mlir_context, xla_params, returns,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 25 19:54:38 UTC 2024 - 9.7K bytes - Viewed (0) -
tensorflow/compiler/jit/tests/device_compiler_test_helper.h
} JitCompilationListener* listener() const { return listener_; } // Returns a test graph that will split into two XLA clusters (due to a node // with _XlaCompile = false). GraphDef GetTestGraph(const PartialTensorShape& input_shape); // Runs the graph using specified batch size both with and without XLA JIT // compilation. Returns an error if the results between the two do not match.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 09 08:24:16 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compiler_client.h
#define TENSORFLOW_COMPILER_JIT_DEVICE_COMPILER_CLIENT_H_ #include <optional> #include <string> #include <variant> #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/executable_build_options.h" namespace tensorflow { template <typename ExecutableType, typename ClientType> class DeviceCompilerClient { public: DeviceCompilerClient() = default;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.h
const std::vector<TensorShape>& arg_shapes, std::vector<tpu::ShardingAndIndex>* arg_core_mapping, std::vector<std::vector<xla::Shape>>* per_core_arg_shapes); // Compiles a serialized MLIR module into XLA HLO, generates all accompanying // metadata and stores them in CompilationResult. absl::StatusOr<XlaCompilationResult> LegalizeWithMlirBridge( const tpu::MlirToHloArgs& computation,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Apr 14 20:29:34 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/jit/device_util.h
#include "absl/container/flat_hash_map.h" #include "absl/numeric/bits.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/status_macros.h" #include "xla/statusor.h" #include "tensorflow/core/framework/types.h" namespace tensorflow { namespace jit { class DeviceInfoCache; class DeviceSet;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 15 17:18:31 UTC 2024 - 7.1K bytes - Viewed (0) -
tensorflow/compiler/jit/tests/BUILD
"@com_google_absl//absl/strings", "@local_xla//xla/service:hlo_proto_cc", ], ) tf_cc_test( name = "device_compiler_serialize_test", srcs = [ "device_compiler_serialize_test.cc", ], tags = [ "config-cuda-only", "no_oss", # This test only runs with GPU. "requires-gpu-nvidia", "xla", ], deps = [ ":device_compiler_test_helper",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 27 18:00:18 UTC 2024 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/jit/encapsulate_xla_computations_pass.h
limitations under the License. ==============================================================================*/ // Rewrites computations generated by the xla.compile() Python code into // XlaLaunch nodes. // // xla.compile() does two main things: // a) marks operators that make up an XLA computation with the attribute // _xla_compile_id=XYZ, where XYZ is a unique key. // b) adds XlaClusterOutput nodes to represent outputs of the computation.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/legalize_tf_quant_test.cc
#include "llvm/ADT/StringRef.h" #include "mlir/Pass/Pass.h" // from @llvm-project #include "tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.h" #include "xla/client/client_library.h" #include "xla/shape.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 18:43:55 UTC 2024 - 7.2K bytes - Viewed (0)