Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 7 of 7 for modulemap (0.18 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/passes.h

    // their region based counterparts.
    std::unique_ptr<OperationPass<ModuleOp>>
    CreateTFFunctionalControlFlowToRegions();
    std::unique_ptr<OperationPass<ModuleOp>> CreateTFFunctionalControlFlowToRegions(
        bool allow_passthrough_args);
    
    // Transforms region bases control flow operations in the TensorFlow dialect to
    // their functional counterparts.
    std::unique_ptr<OperationPass<ModuleOp>>
    CreateTFRegionControlFlowToFunctional();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 31.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/internal/passes/xla_broadcast.cc

          }
        }
        return WalkResult::advance();
      });
    
      return success();
    }
    
    void XlaBroadcast::runOnOperation() {
      FuncOp func = getOperation();
      mlir::ModuleOp module = func->getParentOfType<mlir::ModuleOp>();
      if (!module) return signalPassFailure();
      func.walk([&](ClusterOp cluster) {
        if (auto replicate = cluster->getParentOfType<ReplicateOp>()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 18:52:07 UTC 2024
    - 13.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.cc

                                             std::string* tpu0_device) {
      // Fetch the TPU devices.
      mlir::ModuleOp moduleOp = op->getParentOfType<mlir::ModuleOp>();
      mlir::TF::RuntimeDevices devices;
      if (failed(tensorflow::GetDevicesFromOp(moduleOp, &devices)))
        return moduleOp.emitOpError() << "No available devices.";
      llvm::ArrayRef<tensorflow::DeviceNameUtils::ParsedName> device_names =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 10 20:10:40 UTC 2024
    - 32.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/decompose_optionals.cc

          } else {
            rewriter.cancelOpModification(caller);
          }
        }
        return success();
      }
    };
    
    void DecomposeOptionalsPass::runOnOperation() {
      mlir::ModuleOp module = getOperation();
    
      RewritePatternSet pattern_list(&getContext());
      pattern_list.add<HandleOptionalFrom>(&getContext());
      pattern_list.add<HandleOptionalGet>(&getContext());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc

      return {ref.data(), ref.size()};
    }
    
    // Dumps the MLIR module to disk.
    // This require the TF_DUMP_GRAPH_PREFIX to be set to a path that exist (or can
    // be created).
    static void DumpModule(mlir::ModuleOp module, std::string file_prefix) {
      std::string prefix = GetDumpDirFromEnvVar();
      if (prefix.empty()) return;
    
      auto* env = tensorflow::Env::Default();
      auto status = env->RecursivelyCreateDir(prefix);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 22:19:26 UTC 2024
    - 18.5K bytes
    - Viewed (0)
  6. platforms/core-configuration/configuration-cache/src/integTest/groovy/org/gradle/internal/cc/impl/isolated/IsolatedProjectsToolingApiIdeaProjectIntegrationTest.groovy

            originalResult.rootIdeaProject.name == 'buildA'
            originalResult.rootIdeaProject.modules.name == ['buildA']
    
            def moduleA = originalResult.rootIdeaProject.modules[0]
            moduleA.dependencies.each {
                assert it instanceof IdeaModuleDependency
            }
            moduleA.dependencies.targetModuleName == ['buildB-b1', 'buildA-buildC', 'buildD-b1']
    
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Sat Jun 08 11:29:25 UTC 2024
    - 20.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.cc

    // Transforms the given module to be suitable for export to TensorFlow GraphDef
    // and then exports all functions to the given library.
    Status PrepareAndExportToLibrary(mlir::ModuleOp module,
                                     FunctionLibraryDefinition* flib_def) {
      // Pass pipeline is defined here instead of leveraging the phase one export
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 22:19:26 UTC 2024
    - 14K bytes
    - Viewed (0)
Back to top