Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 16 of 16 for Platen (0.07 sec)

  1. tensorflow/compiler/jit/cluster_scoping_pass.cc

    // Node_Y will receive both scopes "unstage" and "stage", while Node_X receives
    // only scope "stage".  The semantic of scope "unstage" is preserved although
    // scope "stage" is later appended.  As a result, Node_X and Node_Y will be put
    // into different clusters.
    //
    //                Unstage -> Node_Y (scope "unstage & stage")
    //                              |
    //                              V
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/constant_fold_utils.cc

        return false;
      }
    
      // If any of the result types are variants, don't try to constant fold them.
      // This creates opaque variant constants which lose information and would
      // require "raising" later.
      for (const Type type : inst->getResultTypes()) {
        if (const TensorType tensor_type = mlir::dyn_cast<TensorType>(type)) {
          if (mlir::isa<VariantType>(tensor_type.getElementType())) {
            return false;
          }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc

    // with appropriate shape to match with the shape of XlaDotV2 result.
    // We didn't apply XlaEinsum or XlaDotV2 for this work, since it would loose
    // the chance for constant folding later. We could try to add some
    // postprocessing passes later to further optimize the graph after constant
    // folding.
    Value CreateZeroPointPartialOffsetXlaDotV2(
        OpBuilder &builder, Location loc, Value tensor,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 47.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/utils/export_utils.cc

        if (stateless && stateless.getValue())
          *node_def->mutable_op() = "Stateless" + node_def->op();
      }
    
      // Add inputs to the NodeDef based on the number of operands. This is required
      // as later when edges are added to the Node using Graph::AddEdge the
      // associated NodeDef is not updated.
      for (int i = 0, e = inst->getNumOperands(); i < e; ++i) {
        node_def->add_input();
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 19.7K bytes
    - Viewed (0)
  5. tensorflow/c/kernels_experimental.cc

        // because a race condition can happen between this and another thread that
        // turns off some variable's `copy_on_read_mode` after this thread enables
        // sparse access; when a later function sees `copy_on_read_mode` is off, it
        // will try to lock the variable again for updating `copy_on_read_mode` and
        // cause the deadlock, since the variable mutex is non-re-entrant.
        for (auto* var : vars) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 23 06:12:29 UTC 2024
    - 30.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf.cc

        Value n_plus_y = rewriter.create<AddOp>(loc, iotaN, y);
    
        // GatherOp is happy about letting us index out of bounds values, but those
        // values will be undefined. So we mask them later. Set up the boolean
        // expression that tells us which entries, in the output shape, are out of
        // bounds and thus become the padding_value.
        Value x_in_bounds = rewriter.create<AndOp>(
            loc,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 20:00:43 UTC 2024
    - 291.8K bytes
    - Viewed (0)
Back to top