Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 232 for _xla (0.04 sec)

  1. tensorflow/compiler/mlir/tf2xla/internal/utils/test_metadata_config.cc

    #include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
    #include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
    #include "xla/mlir_hlo/mhlo/IR/register.h"
    #include "xla/shape.h"
    #include "xla/translate/mhlo_to_hlo/type_to_shape.h"
    #include "tensorflow/core/framework/tensor_shape.h"
    #include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
    #include "tsl/platform/errors.h"
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 23:59:33 UTC 2024
    - 3.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_host_recv_device_context.h

    #ifndef TENSORFLOW_COMPILER_JIT_XLA_HOST_RECV_DEVICE_CONTEXT_H_
    #define TENSORFLOW_COMPILER_JIT_XLA_HOST_RECV_DEVICE_CONTEXT_H_
    
    #include "xla/shape.h"
    #include "xla/stream_executor/device_memory.h"
    #include "xla/stream_executor/stream.h"
    #include "tensorflow/core/framework/device_base.h"
    #include "tfrt/concurrency/async_value_ref.h"  // from @tf_runtime
    
    namespace tensorflow {
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 3.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/preprocess_op_weight_only.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-preprocess-op='target-opset=XLA quantization-method=weight_only enable-per-channel-quantization=false' | FileCheck --check-prefix PerTensor %s
    // RUN: tf-quant-opt %s -split-input-file -quant-preprocess-op='target-opset=XLA quantization-method=weight_only enable-per-channel-quantization=true' | FileCheck --check-prefix PerChannel %s
    
    module {
      // For XLA weight-only per-channel depthwise convolution, tensor shape should have
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 4.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/pjrt_tensor_buffer.h

    #include <memory>
    #include <utility>
    
    #include "xla/pjrt/pjrt_client.h"
    #include "tensorflow/core/framework/allocation_description.pb.h"
    #include "tensorflow/core/framework/tensor.h"
    
    namespace tensorflow {
    
    // PjRtTensorBuffer is derived from TensorBuffer, which holds a device memory
    // pointer so that legacy TF kernel can access it directly. PjRtTensorBuffer
    // also owns a PjRtBuffer for XLA kernel's usage.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/BUILD

            "@llvm-project//mlir:MlirOptLib",
            "@llvm-project//mlir:Support",
            "@llvm-project//mlir:Transforms",
            "@local_xla//xla/mlir/framework/ir:xla_framework",
            "@local_xla//xla/mlir/framework/transforms:passes",
            "@local_xla//xla/mlir_hlo:all_passes",
        ],
    )
    
    cc_library(
        name = "passes",
        visibility = [
            ":__subpackages__",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 22:19:26 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/defs.h

    // Provides definitions needed for use of the TensorFlow XLA
    // device.
    
    #ifndef TENSORFLOW_COMPILER_JIT_DEFS_H_
    #define TENSORFLOW_COMPILER_JIT_DEFS_H_
    
    namespace tensorflow {
    
    // Name of attribute used to tag operators for compilation with XLA
    
    // Implies must-compile semantics: either it will be compiled
    // with XLA, or an error will be thrown.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 07 01:03:32 UTC 2021
    - 1.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/xla_cpu_device.cc

    limitations under the License.
    ==============================================================================*/
    
    // Registers the XLA_CPU device, which is an XlaDevice instantiation that runs
    // operators using XLA via the XLA "Host" (CPU) backend.
    
    #include <array>
    
    #include "absl/memory/memory.h"
    #include "tensorflow/compiler/jit/defs.h"
    #include "tensorflow/compiler/jit/flags.h"
    #include "tensorflow/compiler/jit/kernels/xla_ops.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc

    #include "tensorflow/compiler/tf2xla/shape_util.h"
    #include "tensorflow/compiler/tf2xla/xla_op_registry.h"
    #include "xla/stream_executor/device_memory.h"
    #include "xla/stream_executor/platform_manager.h"
    #include "xla/stream_executor/stream.h"
    #include "xla/stream_executor/stream_executor.h"
    #include "tensorflow/core/framework/tensor_testutil.h"
    #include "tsl/lib/core/status_test_util.h"
    
    namespace tensorflow {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/compile_mlir_util/add.mlir

    // RUN: tf-mlir-translate -mlir-tf-to-hlo-text %s -tf-input-shapes=: -tf-xla-emit-return-tuple | FileCheck %s
    // RUN: tf-mlir-translate -mlir-tf-to-hlo-text %s -tf-input-shapes=: -tf-xla-emit-use-tuple-args -tf-xla-emit-return-tuple | FileCheck -check-prefix=TUPLE-ARGS %s
    // RUN: tf-mlir-translate -mlir-tf-to-hlo-text %s -tf-input-shapes=: | FileCheck -check-prefix=NO_RET_TUPLE %s
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 23 18:56:13 UTC 2022
    - 2.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo_test.cc

    #include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
    #include "tensorflow/compiler/tf2xla/xla_compiler.h"
    #include "tensorflow/compiler/tf2xla/xla_helpers.h"
    #include "xla/client/client_library.h"
    #include "xla/shape.h"
    #include "xla/stream_executor/platform.h"
    #include "xla/stream_executor/platform_manager.h"
    #include "tensorflow/core/framework/tensor_shape.h"
    #include "tensorflow/core/framework/types.pb.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 20:29:34 UTC 2024
    - 6K bytes
    - Viewed (0)
Back to top