Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 26 for TF_RET_CHECK (0.2 sec)

  1. tensorflow/compiler/jit/xla_tpu_device.cc

        se::Stream* const src_compute_stream = src_xla_context->stream();
        TF_RET_CHECK(src_compute_stream != nullptr);
        TF_RET_CHECK(input->dtype() == output->dtype())
            << "input type: " << DataTypeString(input->dtype()) << " output type "
            << DataTypeString(output->dtype());
        TF_RET_CHECK(input->shape() == output->shape());
        TF_RET_CHECK(DMAHelper::CanUseDMA(input));
        auto* const src_compute_stream_impl =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 22:53:47 UTC 2024
    - 20.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/extract_outside_compilation_pass.cc

        Graph* g, Node* n, FunctionLibraryDefinition* fld) {
      TF_RET_CHECK(n->IsWhileNode());
    
      // Check if there is any lifted args in body function.
      NameAttrList body_func;
      TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "body", &body_func));
      const FunctionDef* body_function_def = fld->Find(body_func.name());
      TF_RET_CHECK(body_function_def);
    
      if (!HasLiftedArgs(*body_function_def)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 104.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.cc

                                 mlir::MLIRContext* mlir_context,
                                 mlir::OwningOpRef<mlir::ModuleOp>* mlir_module) {
      TF_RET_CHECK(!serialized_mlir_module.empty())
          << "unexpected empty serialized MLIR module string";
      TF_RET_CHECK(mlir_module) << "unexpected null MLIR module pointer";
    
      // Make sure we catch any error reported by MLIR and forward it to the TF
      // error reporting system.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Feb 26 03:47:51 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_device_compiler_client.cc

          options, result, client_->default_device_ordinal());
      TF_ASSIGN_OR_RETURN(
          auto executables,
          client_->Compile(*result.computation, argument_layouts, build_options));
      TF_RET_CHECK(executables.size() == 1);
      return std::move(executables[0]);
    }
    
    absl::StatusOr<std::string> XlaDeviceCompilerClient::SerializeExecutable(
        const xla::LocalExecutable& executable) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 4.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/xla_launch_util.cc

        TF_RETURN_IF_ERROR(
            ctx->allocate_temp(output_dtype, output_shape, &output_tensor));
        if (output_tensor.TotalBytes() > 0) {
          XlaTensor* xla_tensor = XlaTensor::FromTensor(&output_tensor);
          TF_RET_CHECK(xla_tensor);
          xla_tensor->set_shaped_buffer(output.TakeSubTree({output_num}));
          if (use_multiple_streams) {
            xla_tensor->ResetDefinitionEvent(definition_event, stream);
          }
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 40.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/get_compiler_ir.cc

          // Handles tf.resource variables.
          TF_RET_CHECK(input->dtype() == DT_RESOURCE);
          const VariableInfo& variable = *variable_info_lookup[input_num];
          arg.kind = XlaCompiler::Argument::kResource;
          arg.resource_kind = XlaResource::kVariable;
          arg.definition_stack_trace = variable.definition_stack_trace();
          TF_RET_CHECK(variable.var() && variable.var()->is_initialized);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 06:59:07 UTC 2024
    - 19K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/encapsulate_xla_computations_pass.cc

          if (le->IsControlEdge()) {
            control_outputs.insert(le->dst());
          } else {
            TF_RET_CHECK(le->src_output() < num_outputs);
            Node* output_node = le->dst();
    
            if (add_edges_to_output_of_downstream_nodes) {
              TF_RET_CHECK(output_node->type_string() == kXlaClusterOutput)
                  << le->DebugString();
              nodes_to_remove.push_back(output_node);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 06:33:33 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/translate/export_graphdef.cc

      auto dict_attr =
          function->getAttrOfType<mlir::DictionaryAttr>(kEntryFuncAttr);
      if (dict_attr) {
        TF_RET_CHECK(mlir::isa<mlir::StringAttr>(dict_attr.get("inputs")))
            << "inputs missing in entry function attribute";
        TF_RET_CHECK(mlir::isa<mlir::StringAttr>(dict_attr.get("outputs")))
            << "outputs missing in entry function attribute";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 35.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.cc

      auto dict_attr =
          function->getAttrOfType<mlir::DictionaryAttr>(kEntryFuncAttr);
      if (dict_attr) {
        TF_RET_CHECK(mlir::isa<mlir::StringAttr>(dict_attr.get("inputs")))
            << "inputs missing in entry function attribute";
        TF_RET_CHECK(mlir::isa<mlir::StringAttr>(dict_attr.get("outputs")))
            << "outputs missing in entry function attribute";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 23:04:51 UTC 2024
    - 35.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/test_util.cc

        const Graph& graph, const GraphShapeInfo& shape_info,
        std::map<string, std::vector<PartialTensorShape>> expected_shapes) {
      for (Node* node : graph.op_nodes()) {
        auto sit = shape_info.find(node->name());
        TF_RET_CHECK(sit != shape_info.end())
            << "Missing shape information for node " << node->name();
        std::vector<PartialTensorShape> shapes;
        for (const auto& output : sit->second) shapes.push_back(output.shape);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 09 11:36:41 UTC 2024
    - 3.7K bytes
    - Viewed (0)
Back to top