Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 93 for created (0.12 sec)

  1. tensorflow/compiler/jit/xla_cpu_device.cc

      // context in tensorflow_accelerator_device_info(). Also,
      // tensorflow_accelerator_device_info() == nullptr is used as an IsCPU test.
      // We need XlaCpuDevice to be treated not as CPU because it allocates
      // XlaTensors, not regular Tensors.
      Status status = device->UseAcceleratorDeviceInfo();
      if (!status.ok()) {
        errors::AppendToMessage(&status, "while setting up ", DEVICE_GPU_XLA_JIT);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_stablehlo_custom_call_to_composite.cc

          return op->emitError("expected exactly one called_computation");
    
        auto decomposition = mlir::cast<FlatSymbolRefAttr>(calledComputations[0]);
    
        auto composite = rewriter.create<mlir::stablehlo::CompositeOp>(
            op.getLoc(), op.getResultTypes(), op.getOperands(), name.str(), attrs,
            decomposition.getValue());
        rewriter.replaceOp(op, composite.getResults());
        return success();
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 4.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc

      auto* ctx = func.getContext();
      patterns.add<PushDownDequantize>(ctx);
      if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns)))) {
        signalPassFailure();
      }
    }
    }  // namespace
    
    // Creates an instance of the TensorFlow Lite optimize op order pass.
    std::unique_ptr<OperationPass<func::FuncOp>> CreateOptimizeOpOrderPass() {
      return std::make_unique<OptimizeOpOrderPass>();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/metrics/error_collector_inst.cc

      common_error_message_.clear();
      error_collector_->Clear();
    }
    
    void ErrorCollectorInstrumentation::runAfterPassFailed(Pass *pass,
                                                           Operation *module) {
      // Create a new error if no errors collected yet.
      if (error_collector_->CollectedErrors().empty() &&
          !common_error_message_.empty()) {
        error_collector_->ReportError(NewConverterErrorData(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 25 01:48:36 UTC 2024
    - 5.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/tflite_legalize_hlo.cc

      patterns->add<odml::ConvertCustomCallOp>(context);
      populateWithGenerated(*patterns);
    
      patterns->add<ConvertReduceOpToTFLiteArgmin, ConvertReduceOpToTFLiteArgmax>(
          context);
    }
    
    // Creates an instance of the pass.
    std::unique_ptr<OperationPass<ModuleOp>> CreateLegalizeHloToTfLitePass() {
      return std::make_unique<LegalizeHloToTfLitePass>();
    }
    
    // Registers the pass implementation
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  6. tensorflow/c/kernels/histogram_summary_op.cc

    #include "tensorflow/core/platform/macros.h"
    #include "tensorflow/core/platform/protobuf.h"
    #include "tensorflow/core/platform/tstring.h"
    #include "tensorflow/core/platform/types.h"
    
    namespace {
    
    // Operators used to create a std::unique_ptr for TF_Tensor and TF_Status.
    struct TFTensorDeleter {
      void operator()(TF_Tensor* tf_tensor) const { TF_DeleteTensor(tf_tensor); }
    };
    
    struct TFStatusDeleter {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 6.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/freeze_global_tensors.cc

            }
          }
    
          // Replace the arg with a tf.Const op in the function body.
          builder.setInsertionPointToStart(&func.getBody().front());
          auto const_op = builder.create<TF::ConstOp>(global_tensor.getLoc(),
                                                      *global_tensor.getValue());
          args_to_erase.set(val.getArgNumber());
          for (auto read_op : read_variable_ops_to_erase) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.8K bytes
    - Viewed (0)
  8. tensorflow/cc/framework/while_gradients.cc

      return strings::StrCat(forward_frame_name, "_backprop");
    }
    
    // Creates a loop that counts the number of iterations performed by the
    // while loop associated with `while_ctx`. The returned output yields the
    // iteration count.
    Status AddForwardLoopCounter(WhileContext* while_ctx, const Scope& scope,
                                 Output* count) {
      // Create while loop:
      //   i = 0
      //   while forward loop predicate is true:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 05:57:22 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/quantize_variables.cc

          // Add dequantize.
          builder.setInsertionPointAfter(read_variable_op);
          auto new_read_variable_op =
              builder.create<ReadVariableOp>(read_variable_op.getLoc(), ref_qtype,
                                             read_variable_op.getResourceId());
          auto new_dq_op = builder.create<DequantizeOp>(
              read_variable_op.getLoc(), read_variable_op.getResult().getType(),
              new_read_variable_op.getResult());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/constant_fold_utils.cc

      // available during compilation or compilation only device for on demand
      // execution which may create a recursion if used for constant folding.
      std::string host_cpu = tensorflow::DeviceNameUtils::FullName(
          /*job=*/"localhost", /*replica=*/0, /*task=*/0, /*type=*/"CPU", /*id=*/0);
    
      absl::StatusOr<OpKernelRunner> runner = OpKernelRunner::Create(
          node_def->get()->op(), node_def->get()->name(), host_cpu, operands.size(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.3K bytes
    - Viewed (0)
Back to top