- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 24 for xla_cpu_jit (0.18 sec)
-
tensorflow/compiler/mlir/tf2xla/api/v2/device_type.proto
syntax = "proto2"; package tensorflow.tf2xla.v2; // The requested device type to compile for. enum DeviceType { DEVICE_TYPE_UNSPECIFIED = 0; XLA_TPU_JIT = 1; XLA_CPU_JIT = 2; XLA_GPU_JIT = 3;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Sep 15 15:50:12 UTC 2023 - 204 bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util_test.cc
failed_legalization, arg_shapes, /*device_type=*/"XLA_TPU_JIT", /*use_tuple_args=*/true, /*enable_op_fallback=*/false, /*shape_determination_fns=*/{}, &compilation_result); EXPECT_FALSE(status.ok()); EXPECT_EQ(count.Delta("tf.DoesntExist", "Unknown"), 1); } TEST(CompileMlirUtil, CreatesPipeline) { OpPassManager pass_manager; llvm::StringRef device_type = "XLA_CPU_JIT";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 25 19:54:38 UTC 2024 - 9.7K bytes - Viewed (0) -
tensorflow/compiler/jit/BUILD
], deps = [ ":xla_cpu_device", ":xla_cpu_jit", "//tensorflow/compiler/plugin", ] + if_cuda_or_rocm([ ":xla_gpu_device", ":xla_gpu_jit", ]) + if_with_tpu_support([ ":xla_tpu_device", ":xla_tpu_jit", ]), alwayslink = 1, ) cc_library( name = "xla_cpu_jit", visibility = ["//visibility:public"], deps = [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 61.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/tfxla_device_specific_transformations_cpu.mlir
// RUN: tf-opt "--tfxla-device-specific-transforms=device-type=XLA_CPU_JIT" -verify-diagnostics -split-input-file %s | FileCheck -dump-input=fail %s module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1399 : i32}} { // CHECK-LABEL: stateless_op func.func @stateless_op() -> tensor<i32> { // CHECK: %cst = "tf.Const"() <{value = dense<1> : tensor<i32>}> : () -> tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 515 bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h
// . Legalizes the operations to XLA HLO operations. // . Canonicalizes the XLA HLO operations. // // device_type: XLA JIT device to use for compilation such as "XLA_CPU_JIT", // "XLA_GPU_JIT" or "XLA_TPU_JIT". // use_tuple_args: when this is true, always create a tuple argument for the // entry computation. // enable_op_fallback: when this is true, prefer tf2xla fallback kernels over // MLIR
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 17:24:39 UTC 2024 - 10.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/mlprogram.cc
pm.addPass(mlir::createCanonicalizerPass()); pm.addPass(mlir::createSymbolDCEPass()); pm.addPass(mlir::TF::CreateTFShapeInferencePass()); llvm::StringRef tf2xla_fallback_device_type = "XLA_CPU_JIT"; pm.addPass(mlir::mhlo::createLegalizeTFPass( /*legalize_chlo=*/true, tf2xla_fallback_device_type, /*prefer_tf2xla=*/false)); pm.addPass(mlir::TF::CreateStripTfAttributesPass());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 09 22:39:15 UTC 2024 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-include-tf2xla-fallback.mlir
// RUN: tf-opt "-xla-legalize-tf=use-tf2xla-fallback=true device-type=XLA_CPU_JIT" -verify-diagnostics %s | FileCheck --check-prefix SUPPORTED_FALLBACK_DEVICE %s // RUN: tf-opt "-xla-legalize-tf=use-tf2xla-fallback=true" %s | FileCheck --check-prefix UNSPECIFIED_FALLBACK_DEVICE %s // RUN: tf-opt "-xla-legalize-tf=use-tf2xla-fallback=true device-type=INVALID_DEVICE_TYPE" %s | FileCheck --check-prefix UNSUPPORTED_FALLBACK_DEVICE %s // We run this test four times:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 16 19:04:03 UTC 2023 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter_test.cc
explicit Tf2XlaRewriterTestPeer(mlir::Operation* op) : op_builder_(op), empty_rewriter_(op_builder_), tf2xla_rewriter_(op, empty_rewriter_, /*device_type=*/"XLA_CPU_JIT") {} absl::StatusOr<TupleOp> ImportXlaComputationIntoModule( XlaComputation& computation) { return tf2xla_rewriter_.ImportXlaComputation(computation); } private: OpBuilder op_builder_;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:16:07 UTC 2024 - 11.7K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_base_device.h
std::string device_name; // The index of the device. int device_ordinal = -1; // The name of the compilation device, also referred to as jit_device_type. // (e.g., "XLA_CPU_JIT"); std::string compilation_device_name; // A vector of ShapeDeterminationFn (i.e., a bundle of LayoutSelectionFn, // ShapeRepresentationFn). Each bundle describes how the on-host shapes of
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/tf_stablehlo_pass.cc
mhlo::PopulateLegalizeTfPatterns(context, &patterns); TF::PopulateTFLoweringBeforeHLOPatterns(context, &patterns); mhlo::Tf2XlaTypeConverter converter; mhlo::PopulateLegalizeTfWithTf2XlaPatterns( "XLA_CPU_JIT", patterns, context, converter, /*prefer_tf2xla=*/false); stablehlo::StablehloToHloTypeConverter hlo_converter; chlo::populateChloToHloPatterns(context, &hlo_converter, &patterns);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 7.5K bytes - Viewed (0)