Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 80 for created (0.21 sec)

  1. tensorflow/cc/framework/while_gradients.cc

      return strings::StrCat(forward_frame_name, "_backprop");
    }
    
    // Creates a loop that counts the number of iterations performed by the
    // while loop associated with `while_ctx`. The returned output yields the
    // iteration count.
    Status AddForwardLoopCounter(WhileContext* while_ctx, const Scope& scope,
                                 Output* count) {
      // Create while loop:
      //   i = 0
      //   while forward loop predicate is true:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 05:57:22 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/quantize_variables.cc

          // Add dequantize.
          builder.setInsertionPointAfter(read_variable_op);
          auto new_read_variable_op =
              builder.create<ReadVariableOp>(read_variable_op.getLoc(), ref_qtype,
                                             read_variable_op.getResourceId());
          auto new_dq_op = builder.create<DequantizeOp>(
              read_variable_op.getLoc(), read_variable_op.getResult().getType(),
              new_read_variable_op.getResult());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc

        if (failed(applyPatternsAndFoldGreedily(func, frozen_patterns))) {
          func.emitError() << "quant-propagate-quantize-type failed.";
          signalPassFailure();
        }
      }
    }
    
    }  // namespace
    
    // Creates an instance of the TensorFlow dialect PropagateQuantizeType pass.
    std::unique_ptr<OperationPass<ModuleOp>> CreatePropagateQuantizeTypePass() {
      return std::make_unique<PropagateQuantizeType>();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/constant_fold_utils.cc

      // available during compilation or compilation only device for on demand
      // execution which may create a recursion if used for constant folding.
      std::string host_cpu = tensorflow::DeviceNameUtils::FullName(
          /*job=*/"localhost", /*replica=*/0, /*task=*/0, /*type=*/"CPU", /*id=*/0);
    
      absl::StatusOr<OpKernelRunner> runner = OpKernelRunner::Create(
          node_def->get()->op(), node_def->get()->name(), host_cpu, operands.size(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc

        if (constant.getType().getRank() != 2) return failure();
    
        // Create a tfl.transpose op that performs ZX transpose on `input`.
        auto create_z_x_transpose_op = [&](Value input) -> Value {
          RankedTensorType input_type =
              mlir::cast<RankedTensorType>(input.getType());
          const int input_rank = input_type.getRank();
    
          // Create a 1D I32 tensor for representing the dimension permutation.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_compile_util.cc

      // _Arg nodes, and let CompileGraph walk it. This could be optimized.
      std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
    
      // First create the actual node we care about computing.
      TF_ASSIGN_OR_RETURN(Node * main_node, graph->AddNode(node_def));
    
      // Create dummy _Arg nodes. Link these to `node` and also via a control
      // dependency edge to the _SOURCE node.
      for (int64_t i = 0, end = args.size(); i < end; ++i) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/passes/nchw_convolution_to_nhwc.cc

        Value input = op->getOperand(0);
        const TensorType new_input_tensor_type = GetTransposedTensorType(
            mlir::cast<TensorType>(input.getType()), kNchwToNhwcPermutation);
    
        auto input_transpose_op = rewriter.create<mlir::stablehlo::TransposeOp>(
            op.getLoc(), /*resultType0=*/new_input_tensor_type, /*operand=*/input,
            rewriter.getDenseI64ArrayAttr(kNchwToNhwcPermutation));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc

          if (QuantizedType::getQuantizedElementType(operand.getType())) {
            auto newTy = QuantizedType::castToExpressedType(operand.getType());
            newOperands.push_back(
                rewriter.create<TFL::DequantizeOp>(loc, newTy, operand));
            continue;
          }
    
          newOperands.push_back(operand);
        }
    
        SmallVector<Type> newResultTys;
        for (auto result : op->getResults()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/lower_globals_to_ml_program.cc

        if (globalTensor.getValue()) {
          initial_value = *globalTensor.getValue();
        } else {
          initial_value = mlir::Attribute();
        }
        opToName[globalTensor] = name;
        auto variableOp = globalBuilder.create<ml_program::GlobalOp>(
            globalTensor.getLoc(), name, globalTensor.getType(),
            globalTensor.getIsMutable(), initial_value,
            /*visibility=*/globalBuilder.getStringAttr("private"));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.cc

        b.setInsertionPoint(op);
        if (auto dq = llvm::dyn_cast<DequantizeOp>(op)) {
          auto dcast = b.create<quantfork::DequantizeCastOp>(
              dq.getLoc(), dq.getOutput().getType(), dq.getInput());
          dq.getOutput().replaceAllUsesWith(dcast);
          dq.erase();
        } else if (auto q = llvm::dyn_cast<QuantizeOp>(op)) {
          auto qcast = b.create<quantfork::QuantizeCastOp>(
              q.getLoc(), q.getOutput().getType(), q.getInput());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 02:50:01 UTC 2024
    - 3.5K bytes
    - Viewed (0)
Back to top