Search Options

Results per page
Sort
Preferred Languages
Advance

Results 71 - 80 of 1,997 for Reserve (4.2 sec)

  1. tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc

                                                int index,
                                                const QuantParamsEntry &info) {
      if (info.params_size() == 0) return;
    
      SmallVector<APFloat, 4> min_maxs;
      min_maxs.reserve(info.params_size() * 2);
      for (const auto &param : info.params()) {
        llvm::APFloat min(param.min_max().min());
        llvm::APFloat max(param.min_max().max());
        min_maxs.push_back(min);
        min_maxs.push_back(max);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 08 10:41:08 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/lift_tflite_flex_ops.cc

                const tensorflow::protobuf::RepeatedPtrField<
                    tensorflow::OpDef::ArgDef>& args,
                llvm::StringRef attr_name) {
              std::vector<int32_t> values;
              values.reserve(args.size());
              for (const auto& arg : args) {
                auto range = arg_ranges.at(arg.name());
                values.push_back(range.second - range.first);
              }
              auto attr_value =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_partitioning.cc

                 << partitioned_input.getNumOperands();
        }
    
        builder.setInsertionPoint(assign_var);
        llvm::SmallVector<Type, 4> partitioned_output_types;
        partitioned_output_types.reserve(num_cores_per_replica);
        for (int i = 0; i < num_cores_per_replica; ++i) {
          const auto& input = packed_input ? inputs[0] : inputs[i];
          partitioned_output_types.push_back(GetResourceSubtype(input.getType()));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 11.8K bytes
    - Viewed (0)
  4. tensorflow/cc/gradients/linalg_grad.cc

      // Get the axis (may be positive, negative or zero) for each of the reduced
      // labels. If the same label appears multiple times, get the left-most axis.
      std::vector<int> reduced_axes;
      reduced_axes.reserve(reduced_subs.size());
      for (const char s : reduced_subs) {
        auto axis = EinsumGetAxisFromLabel(subscripts, s);
        if (!axis.has_value()) {
          // Should never happen.
          scope.UpdateStatus(errors::Internal(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 07 23:11:54 UTC 2022
    - 20.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc

                                             getElementTypeOrSelf(filter_type))
                                             .getScales();
        std::vector<double> result_scales;
        result_scales.reserve(filter_scales.size());
    
        for (const double filter_scale : filter_scales) {
          result_scales.push_back(input_scale * filter_scale);
        }
    
        const ArrayRef<int64_t> zero_points =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 06:04:36 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/functional_control_flow_to_regions.cc

        auto inputs = func.getFunctionType().getInputs();
        entry->addArguments(inputs, SmallVector<Location>(inputs.size(), loc));
        args = entry->getArguments();
      }
      llvm::SmallVector<Value, 4> casted_args;
      casted_args.reserve(func.getNumArguments());
      for (const auto& ArgAndType : zip(args, func.getFunctionType().getInputs())) {
        Value arg = std::get<0>(ArgAndType);
        Type expected_type = std::get<1>(ArgAndType);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11K bytes
    - Viewed (0)
  7. src/runtime/sys_linux_loong64.s

    TEXT runtime·callCgoMmap(SB),NOSPLIT,$0
    	MOVV	addr+0(FP), R4
    	MOVV	n+8(FP), R5
    	MOVW	prot+16(FP), R6
    	MOVW	flags+20(FP), R7
    	MOVW	fd+24(FP), R8
    	MOVW	off+28(FP), R9
    	MOVV	_cgo_mmap(SB), R13
    	SUBV	$16, R3		// reserve 16 bytes for sp-8 where fp may be saved.
    	JAL	(R13)
    	ADDV	$16, R3
    	MOVV	R4, ret+32(FP)
    	RET
    
    // func sysMunmap(addr unsafe.Pointer, n uintptr)
    TEXT runtime·sysMunmap(SB),NOSPLIT|NOFRAME,$0
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Aug 25 20:58:13 UTC 2023
    - 14.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_tf_xla_call_module_to_stablehlo_pass.cc

        // The XlaCallModuleOp's input is tensor<*xf32> while the function's
        // argument type is tensor<1x2f32>.
        SmallVector<Value, 4> casted_operands;
        casted_operands.reserve(main_fn.getNumArguments());
        for (const auto &operand_and_type :
             zip(op.getOperands(), main_fn.getFunctionType().getInputs())) {
          Value operand = std::get<0>(operand_and_type);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jan 25 09:43:18 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  9. tensorflow/c/tf_tensor.cc

                                  const tensorflow::TensorShape& shape) {
      static char empty;
      int64_t nelems = 1;
      std::vector<int64_t> dims;
      auto shape_dims = shape.dims();
      dims.reserve(shape_dims);
      for (int i = 0; i < shape_dims; ++i) {
        dims.push_back(shape.dim_size(i));
        nelems *= shape.dim_size(i);
      }
      CHECK_EQ(nelems, 0);
      return TF_NewTensor(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 21:57:32 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc

      std::vector<Value> sliced_rhs =
          sliceInput(input_rhs, bcast.y_batch_size(), loc, rewriter);
    
      // Compute (single batch) MatMul for each output batch.
      std::vector<Value> matmuls;
      matmuls.reserve(bcast.output_batch_size());
      for (int batch_idx : llvm::seq<int>(0, bcast.output_batch_size())) {
        int lhs_batch_idx, rhs_batch_idx;
        if (bcast.IsBroadcastingRequired()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.6K bytes
    - Viewed (0)
Back to top