Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 93 for created (0.07 sec)

  1. tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc

        if (constant.getType().getRank() != 2) return failure();
    
        // Create a tfl.transpose op that performs ZX transpose on `input`.
        auto create_z_x_transpose_op = [&](Value input) -> Value {
          RankedTensorType input_type =
              mlir::cast<RankedTensorType>(input.getType());
          const int input_rank = input_type.getRank();
    
          // Create a 1D I32 tensor for representing the dimension permutation.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/transforms/utils.cc

    ConstantOp GetScalarConstOfType(Type ty, Location loc, int64_t raw_value,
                                    OpBuilder* builder) {
      return builder->create<ConstantOp>(loc, hlo::getScalarOfType(ty, raw_value));
    }
    
    ConstantOp GetScalarNegZeroOfType(Type ty, Location loc, OpBuilder* builder) {
      return builder->create<ConstantOp>(loc, hlo::getScalarNegZeroOfType(ty));
    }
    
    DenseIntElementsAttr GetI64ElementsAttr(ArrayAttr attr) {
      RankedTensorType ty =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 1.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_compile_util.cc

      // _Arg nodes, and let CompileGraph walk it. This could be optimized.
      std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
    
      // First create the actual node we care about computing.
      TF_ASSIGN_OR_RETURN(Node * main_node, graph->AddNode(node_def));
    
      // Create dummy _Arg nodes. Link these to `node` and also via a control
      // dependency edge to the _SOURCE node.
      for (int64_t i = 0, end = args.size(); i < end; ++i) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/passes/nchw_convolution_to_nhwc.cc

        Value input = op->getOperand(0);
        const TensorType new_input_tensor_type = GetTransposedTensorType(
            mlir::cast<TensorType>(input.getType()), kNchwToNhwcPermutation);
    
        auto input_transpose_op = rewriter.create<mlir::stablehlo::TransposeOp>(
            op.getLoc(), /*resultType0=*/new_input_tensor_type, /*operand=*/input,
            rewriter.getDenseI64ArrayAttr(kNchwToNhwcPermutation));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc

          if (QuantizedType::getQuantizedElementType(operand.getType())) {
            auto newTy = QuantizedType::castToExpressedType(operand.getType());
            newOperands.push_back(
                rewriter.create<TFL::DequantizeOp>(loc, newTy, operand));
            continue;
          }
    
          newOperands.push_back(operand);
        }
    
        SmallVector<Type> newResultTys;
        for (auto result : op->getResults()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/lower_globals_to_ml_program.cc

        if (globalTensor.getValue()) {
          initial_value = *globalTensor.getValue();
        } else {
          initial_value = mlir::Attribute();
        }
        opToName[globalTensor] = name;
        auto variableOp = globalBuilder.create<ml_program::GlobalOp>(
            globalTensor.getLoc(), name, globalTensor.getType(),
            globalTensor.getIsMutable(), initial_value,
            /*visibility=*/globalBuilder.getStringAttr("private"));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.cc

        b.setInsertionPoint(op);
        if (auto dq = llvm::dyn_cast<DequantizeOp>(op)) {
          auto dcast = b.create<quantfork::DequantizeCastOp>(
              dq.getLoc(), dq.getOutput().getType(), dq.getInput());
          dq.getOutput().replaceAllUsesWith(dcast);
          dq.erase();
        } else if (auto q = llvm::dyn_cast<QuantizeOp>(op)) {
          auto qcast = b.create<quantfork::QuantizeCastOp>(
              q.getLoc(), q.getOutput().getType(), q.getInput());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 02:50:01 UTC 2024
    - 3.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/debug/debug_test.cc

              return registry;
            }()) {
        context_.loadAllAvailableDialects();
    
        mlir::OpBuilder builder(&context_);
        module_ = builder.create<mlir::ModuleOp>(builder.getUnknownLoc());
    
        builder.setInsertionPointToStart(module_->getBody());
        auto func = builder.create<mlir::func::FuncOp>(  //
            builder.getUnknownLoc(), "main", builder.getFunctionType({}, {}));
        func->setAttr("tfl.func", builder.getUnitAttr());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 17 11:15:16 UTC 2024
    - 9.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tfrt/tests/saved_model/saved_model_test.cc

      auto runtime =
          tensorflow::tfrt_stub::Runtime::Create(/*num_inter_op_threads=*/1);
      tfrt_stub::GraphExecutionOptions options(runtime.get());
    
      options.compile_options.device_target = TfrtDeviceInfraTarget::kGpu;
    
      TF_ASSERT_OK_AND_ASSIGN(
          std::unique_ptr<tfrt_stub::FallbackState> fallback_state,
          tfrt_stub::FallbackState::Create(SessionOptions(), FunctionDefLibrary()));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Oct 13 01:17:29 UTC 2023
    - 9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/odml_converter/folders.cc

    namespace {
    
    // Helper class for parsing operands to a foldable operation.
    class FoldAdaptor {
     public:
      // Returns std::nullopt if the operation cannot be folded.
      static std::optional<FoldAdaptor> Create(Operation* operation) {
        auto foldable_opr = [](Value val) -> bool {
          return !llvm::isa<BlockArgument>(val) &&
                 llvm::isa<stablehlo::ConstantOp>(val.getDefiningOp());
        };
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 06:11:55 UTC 2024
    - 4.5K bytes
    - Viewed (0)
Back to top