Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for Kramm (0.04 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/tf_data_optimization.td

      (TF_MapAndBatchDatasetOp $input_dataset, $other_arguments, $batch_size,
         (TF_ConstOp (GetI64ScalarElementsAttr<1>)), $drop_remainder, $f,
            $batch_output_types, $batch_output_shapes, $preserve_cardinality,
            // TODO(kramm): Should we merge batch_dataset_metadata and
            //              map_dataset_metadata?
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 1.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/decompose_resource_ops.td

         // var <- var - mom
         (TF_AssignSubVariableOp $var_resource, $mom_new)
       ]
       >;
    
    // Same as DecomposeResourceApplyFtrlV2, with l2_shrinkage set to zero.
    // TODO(kramm): Move this pattern to canonicalize.td?
    def DecomposeResourceApplyFtrl : Pat<
      (TF_ResourceApplyFtrlOp $var, $accum, $linear, $grad, $lr, $l1, $l2,
         $lr_power, $use_locking, $multiply_linear_by_lr),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 19:47:48 UTC 2024
    - 20.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.cc

    static FailureOr<std::vector<int64_t>> GetTPUInfeedLayoutFromAPI(
        RankedTensorType t) {
      // Call the TPU API to determine the right infeed layout. Note that
      // this can fail if we're not running on a TPU-enabled node.
      // TODO(kramm): Move this into a separate pass. See b/184944903
      xla::Shape old_shape = xla::TypeToShape(t);
      XLA_Shape old_shape_c = {};
      XLA_Shape new_shape_c = {};
      TfTpu_ExecutorApiFn *executor = stream_executor::tpu::ExecutorApiFn();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/remove_unused_arguments.cc

        }
      }
    }
    
    // Erases the given results from an operation, similar to what
    // Operation::eraseArguments does (but for results).
    // This is a lengthy bit of code, since it has to recreate the operation.
    // TODO(kramm): Move this under utils/ somewhere.
    void EraseResults(Operation* op, llvm::BitVector erase) {
      assert(!op->getNumRegions());
      std::vector<Type> result_types;
      for (auto result : op->getResults()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_cluster_formation.cc

      // with the same value (but different attributes!) into the same tf.Const
      // definition, potentially leading to bogus _replication_info attributes. So
      // we just scrub all tf.Constants of all extra attributes.
      // TODO(kramm): Remove this once tf.Const's folder is aware of extra
      // attributes.
      auto value_str_attr = StringAttr::get(&getContext(), "value");
      getOperation().walk([&](mlir::TF::ConstOp cst) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 22:03:30 UTC 2024
    - 39.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf.cc

        // Which dimensions (in the input) the two offset "columns" map to.
        SmallVector<int64_t, 2> start_index_map({num_dims - 2, num_dims - 1});
    
        // Gather the diagonal entries.
        // TODO(kramm): For a single diagonal, this might be slower than the
        //              mask + sum approach. Special-case num_diags==1?
        auto dims_attr = GatherDimensionNumbersAttr::get(
            rewriter.getContext(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 20:00:43 UTC 2024
    - 291.8K bytes
    - Viewed (0)
Back to top