Search Options

Results per page
Sort
Preferred Languages
Advance

Results 81 - 90 of 198 for _xla (0.05 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/xla_validate_inputs.mlir

    // RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-xla-validate-inputs
    
    // expected-error @+1 {{expects no nested calls of entry functions as they prevent graph traversal in some passes from working correctly}}
    func.func @nested_entry_functions() attributes {tf.entry_function = {}} {
      tf_executor.graph {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 19:29:14 UTC 2024
    - 818 bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/python/BUILD

            "@llvm-project//llvm:Support",
            "@llvm-project//mlir:FuncDialect",
            "@llvm-project//mlir:IR",
            "@llvm-project//mlir:Support",
            "@local_xla//xla/service:hlo_parser",
            "@local_xla//xla/service:hlo_proto_cc",
            "@local_xla//xla/translate/hlo_to_mhlo:hlo_to_mlir_hlo",
        ],
    )
    
    # Smaller version of flatbuffer_translate which only converts flatbuffer to MLIR.
    cc_library(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:23:49 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  3. tensorflow/c/experimental/stream_executor/stream_executor.cc

    #include "tensorflow/c/tf_status_helper.h"
    #include "xla/stream_executor/executor_cache.h"
    #include "xla/stream_executor/host_memory_allocation.h"
    #include "xla/stream_executor/memory_allocation.h"
    #include "xla/stream_executor/platform.h"
    #include "xla/stream_executor/platform_manager.h"
    #include "xla/stream_executor/stream.h"
    #include "xla/stream_executor/stream_executor.h"
    #include "xla/stream_executor/stream_executor_common.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jun 14 07:39:19 UTC 2024
    - 27.1K bytes
    - Viewed (0)
  4. tensorflow/BUILD

            [],
            otherwise = [
                "@local_xla//xla/stream_executor/cuda:all_runtime",
                "@local_xla//xla/stream_executor/cuda:cuda_driver",
                "@local_xla//xla/stream_executor/cuda:cuda_platform",
                "@local_xla//xla/stream_executor/cuda:cudnn_plugin",
                "@local_xla//xla/stream_executor/cuda:cufft_plugin",
                "@local_xla//xla/stream_executor:cuda_platform",
            ],
        ),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 16:51:59 UTC 2024
    - 53.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/BUILD

            "@llvm-project//mlir:QuantOps",
            "@llvm-project//mlir:Support",
            "@llvm-project//mlir:TranslateLib",
            "@local_xla//xla/hlo/ir:hlo",
            "@local_xla//xla/service:hlo_module_config",
            "@local_xla//xla/service:hlo_proto_cc",
            "@local_xla//xla/translate/mhlo_to_hlo:type_to_shape",
            "@stablehlo//:stablehlo_ops",
        ],
        alwayslink = 1,
    )
    
    cc_library(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 21:28:13 UTC 2024
    - 47.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc

          llvm::cl::desc("Choose target opset."),
          llvm::cl::values(
              clEnumValN(OpSet::TF, "TF",
                         "Uses TF ops that mimic quantization behavior"),
              clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"),
              clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED",
                         "Uses TF Uniform Quantized ops"))};
    
      Option<int64_t> min_num_elements_for_weights_{
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc

          llvm::cl::desc("Choose target opset."),
          llvm::cl::values(
              clEnumValN(OpSet::TF, "TF",
                         "Uses TF ops that mimic quantization behavior"),
              clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"),
              clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED",
                         "Uses TF Uniform Quantized ops"))};
    };
    
    // Check if given indices in `val1` has same number of elements as given
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.h

    CreateTPUClusterFormationPass(bool strict_clusters = false);
    
    // Creates a pass that extracts outside compilation (Host ops inside device
    // cluster) at head/tail of Device cluster to run before/after XLA computation.
    std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>>
    CreateExtractHeadTailOutsideCompilationPass();
    
    // Creates a pass that extract outside compilation (Host ops inside cevice
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 02:01:13 UTC 2024
    - 3.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/BUILD

            "@local_tsl//tsl/platform:error_logging",
            "@local_tsl//tsl/platform:errors",
            "@local_xla//xla:xla_data_proto_cc",
            "@local_xla//xla:xla_proto_cc",
            "@local_xla//xla/mlir_hlo",
        ],
    )
    
    cc_library(
        name = "tpu_variable_runtime_reformatting",
        srcs = ["tpu_variable_runtime_reformatting.cc"],
        hdrs = [
            "runtime_passes.h",
        ],
        deps = [
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10K bytes
    - Viewed (0)
  10. .bazelrc

    #     --config=dbg --per_file_copt=+tensorflow/core/kernels/identity_op.*@-g
    # Since this .bazelrc file is synced between the tensorflow/tensorflow repo and
    # the openxla/xla repo, also include debug info for files under xla/.
    build:dbg --per_file_copt=+.*,-tensorflow.*,-xla.*@-g0
    build:dbg --per_file_copt=+tensorflow/core/kernels.*@-g0
    # for now, disable arm_neon. see: https://github.com/tensorflow/tensorflow/issues/33360
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 17:12:54 UTC 2024
    - 52.9K bytes
    - Viewed (0)
Back to top