Search Options

Results per page
Sort
Preferred Languages
Advance

Results 181 - 190 of 245 for tpu0 (0.05 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops_test.cc

    #include "tensorflow/core/lib/monitoring/cell_reader.h"
    #include "tensorflow/core/platform/env.h"
    #include "tensorflow/core/platform/resource_loader.h"
    #include "tensorflow/core/platform/test.h"
    #include "tensorflow/core/tpu/tpu_defs.h"
    #include "tensorflow/core/util/debug_data_dumper.h"
    #include "tsl/framework/device_type.h"
    #include "tsl/lib/core/status_test_util.h"
    
    namespace tensorflow {
    namespace tfrt_compiler {
    namespace {
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:44:37 UTC 2024
    - 6.5K bytes
    - Viewed (0)
  2. tensorflow/cc/framework/while_gradients_test.cc

      CreateBackprop();
    
      Run<double>({1.0, 1.0}, {-2.0, 2.0});
      Run<double>({0.0, 0.0}, {-2.0, 2.0});
    }
    
    TEST_F(WhileGradientsTest, MultipleDevices) {
      // Make sure loop is created on cpu0
      scope_ = scope_.WithDevice("/cpu:0");
    
      // Create loop: while (i < 10) i += j
      Init(2);
      CreateLoop(
          [](const Scope& s, const std::vector<Output>& inputs, Output* output) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 27 20:32:17 UTC 2017
    - 7.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h

      // XLA-compiled. This is only needed if
      // - `upgrade_legacy` is true
      // - upgrading legacy features of the graph (which includes functionalization)
      //   runs before compilation cluster extraction (as for MLIR-based TPU bridge)
      // - session runtime is used (session runtime has issues with function names
      //   rewritten by functionalization).
      // Otherwise, this parameter should be set to false.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 04:56:10 UTC 2024
    - 6.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/tpu_parallel_execute_sink_resource_write.mlir

    // RUN: tf-opt %s -tf-tpu-parallel-execute-sink-resource-write | FILECHECK_OPTS="" FileCheck %s
    
    // CHECK-LABEL: func @multiple_uses
    // CHECK-SAME:  ({{.+}}: tensor<i1>, [[ARG1:%.+]]: tensor<!tf_type.resource>)
    func.func @multiple_uses(%arg0: tensor<i1>, %arg1: tensor<!tf_type.resource>) -> tensor<i1> {
      // CHECK:      [[PARALLEL_EXECUTE:%.+]]:2 = "tf_device.parallel_execute"
      %0:2 = "tf_device.parallel_execute"() ({
        tf_device.return %arg0 : tensor<i1>
      }, {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 12:06:33 UTC 2022
    - 7.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/BUILD

            "//tensorflow/compiler/mlir/quantization/...",
            "//tensorflow/compiler/mlir/tf2xla/transforms/...",
            "//tensorflow/lite/...",
            "//third_party/cloud_tpu/inference_converter/...",  # TPU Inference Converter V1
        ] + internal_visibility_allowlist(),
    )
    
    package(
        # copybara:uncomment default_applicable_licenses = ["@stablehlo//:license"],
        default_visibility = [
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 02:59:01 UTC 2024
    - 28.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_platform_info.h

    // type.
    absl::StatusOr<DeviceType> GetCompilationDeviceType(
        const DeviceType& platform_device_type);
    
    // Builds a DeviceCompiler that uses xla::LocalClient using `platform_info` and
    // `compilation_device_type` (in non-TPU case) and sets *xla_device_compiler to
    // point to it. Uses flags from `MarkForCompilationPassFlags` for configuring
    // the persistor used in the DeviceCompiler. The platform ID from
    // `platform_info` must not be null in CPU case.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  7. tensorflow/c/eager/c_api_test_util.h

                  TFE_TensorHandle* axis);
    
    // If there is a device of type `device_type`, returns true
    // and sets 'device_name' accordingly.
    // `device_type` must be either "GPU" or "TPU".
    bool GetDeviceName(TFE_Context* ctx, tensorflow::string* device_name,
                       const char* device_type);
    
    // Create a ServerDef with the given `job_name` and add `num_tasks` tasks in it.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jul 17 23:43:59 UTC 2023
    - 7.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.cc

      if (!is_calibration_required) return absl::OkStatus();
    
      // `duplicate_shape_determining_constants = false` because the
      // resulting graph of this step is not expected to be loaded on TPU.
      const ExportOptions export_opts = {
          /*duplicate_shape_determining_constants=*/false,
          /*unfreeze_constants=*/false, checkpoint_dir,
          /*debug_name=*/absl::StrCat(kName, kExportStepSuffix)};
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  9. RELEASE.md

            `.predict` is available for Cloud TPUs, Cloud TPU, for all types of
            Keras models (sequential, functional and subclassing models).
        *   Automatic outside compilation is now enabled for Cloud TPUs. This allows
            `tf.summary` to be used more conveniently with Cloud TPUs.
        *   Dynamic batch sizes with DistributionStrategy and Keras are supported on
            Cloud TPUs.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 730.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.cc

                           const FunctionLibraryDefinition* function_library) {
      auto predicate = [](const Graph& graph) {
        for (const Node* node : graph.nodes()) {
          // _tpu_replicate is used in replicated TPU graphs. It will be converted
          // to_replication_info and _xla_compile_device_type in phase 1 pipelines.
          if (node->attrs().FindByString(std::string(kTpuReplicateAttr))) {
            return true;
          }
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 12:22:33 UTC 2024
    - 8.9K bytes
    - Viewed (0)
Back to top