Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for sgd (0.03 sec)

  1. tensorflow/compiler/mlir/tensorflow_to_stablehlo/python/integration_test/tensorflow_to_stablehlo_test.py

        def call(self, x):
          return x + 1
    
      model = AddOneModel()
    
      x_train = tf.constant([1, 2, 3, 4, 5], dtype=tf.float32)
      y_train = tf.constant([2, 3, 4, 5, 6], dtype=tf.float32)
    
      model.compile(optimizer='sgd', loss='mse')
      model.fit(x_train, y_train, epochs=1)
    
      path = tempdir + '/add_one_model'
      model.save(path)
      return path
    
    
    class TensorflowToStableHLOTest(test.TestCase):
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 22:58:42 UTC 2024
    - 2.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td

      );
    }
    
    
    def TF_XlaSparseDenseMatmulGradWithSgdAndStaticBufferSizeOp : TF_Op<"XlaSparseDenseMatmulGradWithSgdAndStaticBufferSize", [Pure]> {
      let summary = "A XLA op which performs the SGD optimizer update for the dense-sparse matrix multiplication.";
    
      let arguments = (ins
        TF_Int32Tensor:$row_pointers,
        TF_Int32Tensor:$sorted_sample_ids,
        TF_Int32Tensor:$sorted_token_ids,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 04:08:35 UTC 2024
    - 90.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

      let results = (outs);
    }
    
    def TF_LoadTPUEmbeddingStochasticGradientDescentParametersOp : TF_Op<"LoadTPUEmbeddingStochasticGradientDescentParameters", [TF_MustExecute, TF_TPUEmbeddingReadEffect]> {
      let summary = "Load SGD embedding parameters.";
    
      let description = [{
    An op that loads optimization parameters into HBM for embedding. Must be
    preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
  4. RELEASE.md

        *   Added `tf.keras.optimizers.experimental.Optimizer`. The reworked
            optimizer gives more control over different phases of optimizer calls,
            and is easier to customize. We provide Adam, SGD, Adadelta, AdaGrad and
            RMSprop optimizers based on
            `tf.keras.optimizers.experimental.Optimizer`. Generally the new
            optimizers work in the same way as the old ones, but support new
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 730.3K bytes
    - Viewed (0)
Back to top