- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 428 for _xla (0.04 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
# Check the converted model in the XLA opset. quantization_options = quant_opts_pb2.QuantizationOptions( quantization_method=quant_opts_pb2.QuantizationMethod( preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8 ), tags=tags, signature_keys=[signature_key], op_set=quant_opts_pb2.XLA, enable_two_input_tensors=not use_kernel, )
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_passes.td
"quant::QuantizationDialect", "shape::ShapeDialect", "sparse_tensor::SparseTensorDialect", "stablehlo::StablehloDialect" ]; } def LegalizeTFCollective : Pass<"xla-legalize-tf-collective", "ModuleOp"> { let summary = "Legalize TF/XLA collective ops (TensorFlow dialect) to the HLO dialect"; let constructor = "mlir::mhlo::CreateLegalizeTFCollectivePass()";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 17:44:14 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/tf_stablehlo_pass.cc
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #include "xla/mlir_hlo/mhlo/IR/register.h" #include "xla/mlir_hlo/mhlo/transforms/passes.h" #include "xla/mlir_hlo/mhlo/transforms/rewriters.h" #include "xla/mlir_hlo/mhlo/utils/type_conversion.h" namespace mlir { namespace odml { class TFToMhloPass
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 7.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compiler_options_util.h
#include "tensorflow/compiler/jit/xla_platform_info.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" namespace tensorflow { // Returns created options for the XLA compiler. XlaCompiler::Options GenerateCompilerOptions( const DeviceCompiler<xla::LocalExecutable, xla::LocalClient>& xla_device_compiler,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Dec 29 01:41:20 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/BUILD
"@local_tsl//tsl/platform:status", "@local_xla//xla/stream_executor", "@local_xla//xla/stream_executor:executor_cache", "@local_xla//xla/stream_executor:host_memory_allocation", "@local_xla//xla/stream_executor:memory_allocation", "@local_xla//xla/stream_executor:platform", "@local_xla//xla/stream_executor:stream_executor_common", ], ) cc_library(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 08 00:27:07 UTC 2024 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compilation_cache.h
#include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" #include "tensorflow/core/platform/mutex.h" namespace tensorflow { namespace device_compilation_cache_internal { template <typename ExecutableType> int64_t ExecutableSize(const ExecutableType* executable) { return 0; } template <> inline int64_t ExecutableSize<xla::LocalExecutable>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 12 08:49:52 UTC 2023 - 8.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/xla_sharding_util_test.cc
#include <string> #include <gtest/gtest.h> #include "mlir/Support/LogicalResult.h" // from @llvm-project #include "xla/xla_data.pb.h" inline constexpr llvm::StringRef kXlaShardingAttrName = "_XlaSharding"; namespace tensorflow { namespace { TEST(DecodeShardingAttributeTest, CheckInvalidString) { xla::OpSharding sharding; EXPECT_TRUE(DecodeShardingAttribute("", sharding).succeeded());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/aot/codegen_test_h.golden
::xla::cpu_function_runtime::BufferInfo(::xla::cpu_function_runtime::EncodedBufferInfo{386ULL, 1U, ~0U}), ::xla::cpu_function_runtime::BufferInfo(::xla::cpu_function_runtime::EncodedBufferInfo{5ULL, ~0U, ~0U}), ::xla::cpu_function_runtime::BufferInfo(::xla::cpu_function_runtime::EncodedBufferInfo{386ULL, 2U, ~0U}), ::xla::cpu_function_runtime::BufferInfo(::xla::cpu_function_runtime::EncodedBufferInfo{5ULL, ~0U, ~0U}),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 01:20:01 UTC 2024 - 16.6K bytes - Viewed (0) -
tensorflow/compiler/aot/codegen.cc
#include "tensorflow/compiler/tf2xla/tf2xla_util.h" #include "xla/cpu_function_runtime.h" #include "xla/service/compiler.h" #include "xla/service/cpu/buffer_info_util.h" #include "xla/shape_util.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/lib/core/errors.h" namespace tensorflow { namespace tfcompile { namespace { using BufferInfo = xla::cpu_function_runtime::BufferInfo; bool IsAlpha(char c) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 01:20:01 UTC 2024 - 36.8K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_compile_util.cc
#include "tensorflow/core/platform/status.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace tensorflow { using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; Status CompileToPjRtLoadedExecutable( const DeviceBase* device, const XlaPlatformInfo& platform_info, const NameAttrList& function, const std::vector<XlaCompiler::Argument>& args,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.8K bytes - Viewed (0)