Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 237 for Operands (0.32 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/cluster_ops_by_policy.cc

      constraints.Walk([](Value value, ValueConstraint constraint) {
        for (OpOperand &operand : value.getUses())
          operand.getOwner()->emitRemark(
              llvm::formatv("operand #{0} constrained to: {1}",
                            operand.getOperandNumber(), constraint));
      });
    }
    
    void EmitInputsConstraintsRemarks(func::FuncOp func,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 27.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.h

          ConversionPatternRewriter& rewriter) const final {
        {
          OperandRange operands = scatter_op.getInputs();
          Value indices = scatter_op.getScatterIndices();
          OperandRange updates = scatter_op.getUpdates();
          if (operands.size() != 1 || updates.size() != 1) return failure();
    
          ShapedType operand_type = mlir::cast<ShapedType>(operands[0].getType());
          ShapedType indices_type = mlir::cast<ShapedType>(indices.getType());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.cc

          // For simplicity, we don't consolidate these ops when all the
          // non-canonicalizable operands are adjacent.
          new_values.push_back(
              rewriter
                  .create<fallback_async::CoreRTTensorHandleToFallbackTensorOp>(
                      op.getLoc(), rewriter.getType<fallback::TFTensorType>(),
                      operand, op->getAttrOfType<mlir::StringAttr>("device"))
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 08 01:19:25 UTC 2023
    - 15.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/quantization_context.cc

                    "Has to fallback values which might introduce errors.\n");
    
      // Use the first immutable state to quantize the rest operands and results.
      if (!immutable_states.empty()) return immutable_states.front()->params;
    
      // If there are no immutable states, use the operand's state if it is the
      // only one operand and has parameters propagated.
      if (op->getNumOperands() == 1 && mutable_operands_num == 1) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 08 01:38:03 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  5. src/fmt/print.go

    }
    
    // Print formats using the default formats for its operands and writes to standard output.
    // Spaces are added between operands when neither is a string.
    // It returns the number of bytes written and any write error encountered.
    func Print(a ...any) (n int, err error) {
    	return Fprint(os.Stdout, a...)
    }
    
    // Sprint formats using the default formats for its operands and returns the resulting string.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 13 21:22:43 UTC 2024
    - 31.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_communication.cc

      if (flatten_tuple) {
        auto operands = llvm::to_vector(values);
        operands.push_back(token);
        return operands;
      }
    
      auto value = values[0];
      // If value with token already exists, reuse it.
      auto it = rewritten_values.find(value);
      if (it != rewritten_values.end()) return {it->getSecond()};
    
      auto create_tuple = [&](ArrayRef<Value> operands) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 40.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc

      }
    
      std::unique_ptr<OpQuantSpec> spec = GetUniformOpQuantSpec(op);
      absl::flat_hash_set<int> operands = spec->quantizable_operands;
      int quant_dim = -1;
      if (enable_per_channel_quantization && operands.size() == 1) {
        quant_dim = spec->coeff_op_quant_dim[*(operands.begin())];
      }
      attrs.push_back(rewriter.getNamedAttr("rhs_quantization_axis",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 18.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc

        }
    
        auto fused_loc = rewriter.getFusedLoc(locations);
    
        // The fused contraction has the same operands as the original contraction
        // with `bias` from the BiasAddOp appended.
        SmallVector<Value, 4> operands(contraction.operand_begin(),
                                       contraction.operand_end());
        operands.push_back(bias_add.getBias());
    
        // The fused contraction has the same attributes as the original
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/tensor_array_ops_decomposition.cc

            decomposed_partitioned_call_callees) {
      for (OpOperand& operand : op.getOpOperands()) {
        if (getElementTypeOrSelf(operand.get().getType()).isa<TF::ResourceType>()) {
          return op.emitOpError()
                 << "found unexpected type " << operand.get().getType()
                 << " of operand #" << operand.getOperandNumber()
                 << ", resource type operands are expected to have been "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 02 20:41:19 UTC 2023
    - 40.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h

    // immutable because they are from quantization-aware training.
    //
    // The algorithm traverses each op and sets the quantization parameters of its
    // operands and results, according to its quantization specification, and then
    // adds the operands and results to the worklist. If there are any conflicts
    // (for example, there are quantization parameters propagated from the previous
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 20 11:42:17 UTC 2024
    - 16.8K bytes
    - Viewed (0)
Back to top