- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 146 for _xla (0.05 sec)
-
tensorflow/compiler/jit/xla_compile_on_demand_op.cc
#include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "xla/client/local_client.h" #include "xla/executable_run_options.h" #include "xla/hlo/ir/hlo_input_output_alias_config.h" #include "xla/pjrt/pjrt_client.h" #include "xla/pjrt/tf_pjrt_client.h" #include "xla/service/executable.h" #include "xla/service/gpu/gpu_executable_run_options.h" #include "tensorflow/core/framework/function.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 08:39:39 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/flags.cc
"Minimum number of operators in an XLA compilation. Ignored for " "operators placed on an XLA device or operators explicitly marked " "for compilation."), Flag("tf_xla_max_cluster_size", &mark_for_compilation_flags->tf_xla_max_cluster_size, "Maximum number of operators in an XLA compilation."), Flag( "tf_xla_ops_to_cluster",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 24.5K bytes - Viewed (0) -
tensorflow/compiler/aot/BUILD
"@local_xla//xla:xla_data_proto_cc", "@local_xla//xla/client:client_library", "@local_xla//xla/client:compile_only_client", "@local_xla//xla/client:xla_computation", "@local_xla//xla/service:compiler", "@local_xla//xla/service/cpu:buffer_info_util", "@local_xla//xla/service/cpu:cpu_compiler", "@local_xla//xla/stream_executor:platform_manager", ], ) tf_cc_test(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 11 16:13:05 UTC 2024 - 11.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/BUILD
"@local_tsl//tsl/platform:protobuf", "@local_xla//xla/client:client_library", "@local_xla//xla/client:compile_only_client", "@local_xla//xla/service/cpu:cpu_compiler", "@local_xla//xla/service/cpu:cpu_transfer_manager", "@local_xla//xla/stream_executor", "@local_xla//xla/stream_executor/host:host_platform", "@local_xla//xla/stream_executor/host:host_platform_id", ],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.h
#include "tensorflow/compiler/jit/variable_info.h" #include "tensorflow/compiler/jit/xla_tensor.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" #include "xla/service/shaped_buffer.h" #include "xla/stream_executor/device_memory_allocator.h" #include "tensorflow/core/framework/allocation_description.pb.h" #include "tensorflow/core/framework/tensor.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 11.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/decompose_resource_ops.td
$resource, (TF_SubOp (CreateTFReadVariableOp $src_op, $value, $resource), $value ), (CreateConstBoolAttrFalse) ) >; // This decomposition is only correct inside XLA as it ignores use_locking // attribute. def DecomposeResourceApplyGradientDescentOp : Pat< (TF_ResourceApplyGradientDescentOp:$src_op $resource, $alpha, $delta, BoolAttr:$_), (TF_AssignVariableOp $resource,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 22 19:47:48 UTC 2024 - 20.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
// The XlaDevice executes a TensorFlow graph using the XLA linear algebra // runtime. // // Operators assigned to an XlaDevice are compiled into XLA computations. // Tensors on an XlaDevice are thin wrappers around XLA ScopedShapedBuffers. // // XlaDevice is instantiated separately for each XLA backend (e.g., CPU or GPU), // under different names (e.g., XLA_CPU or XLA_GPU).
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_test.cc
EXPECT_EQ(exec_args.size(), 2); std::shared_ptr<xla::Literal> literal1 = *exec_args[0]->ToLiteralSync(); EXPECT_TRUE(xla::LiteralTestUtil::Equal( *literal1, xla::LiteralUtil::CreateR2<int32_t>({{1, 2, 3}}))); std::shared_ptr<xla::Literal> literal2 = *exec_args[1]->ToLiteralSync(); EXPECT_TRUE(xla::LiteralTestUtil::Equal( *literal2, xla::LiteralUtil::CreateR2<int32_t>({{4, 5, 6}}))); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/jit/get_compiler_ir.cc
#include "tensorflow/compiler/jit/xla_platform_info.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/executable_build_options.h" #include "xla/client/local_client.h" #include "xla/service/hlo_graph_dumper.h" #include "xla/status_macros.h" #include "xla/stream_executor/host/host_platform_id.h" #include "xla/stream_executor/platform.h" #include "tensorflow/core/common_runtime/eager/tensor_handle.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 19K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h
// eliminates the use of resource variables. // . Legalizes the operations to XLA HLO operations. // . Canonicalizes the XLA HLO operations. // // device_type: XLA JIT device to use for compilation such as "XLA_CPU_JIT", // "XLA_GPU_JIT" or "XLA_TPU_JIT". // use_tuple_args: when this is true, always create a tuple argument for the // entry computation.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 17:24:39 UTC 2024 - 10.4K bytes - Viewed (0)