Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for mse (0.04 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_custom_aggregation_ops.mlir

    // HISTOGRAM-MSE-BRUTEFORCE-CHECK-NEXT:  "tf.AddV2"
    // HISTOGRAM-MSE-BRUTEFORCE-CHECK-NEXT:  return
    
    // HISTOGRAM-MSE-BRUTEFORCE-CHECK: func @composite_conv2d_with_relu6_fn
    // HISTOGRAM-MSE-BRUTEFORCE-CHECK-NEXT:  "tf.Conv2D"
    // HISTOGRAM-MSE-BRUTEFORCE-CHECK-NEXT:  "tf.Relu6"
    // HISTOGRAM-MSE-BRUTEFORCE-CHECK-NEXT:  return
    
    // CalibrationOptions(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 32.1K bytes
    - Viewed (0)
  2. src/runtime/arena_test.go

    		runSubTestUserArenaNew(t, sp, true)
    
    		spm := &smallPointerMix{sp, 5, nil, [11]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}}
    		runSubTestUserArenaNew(t, spm, true)
    
    		mse := new(mediumScalarEven)
    		for i := range mse {
    			mse[i] = 121
    		}
    		runSubTestUserArenaNew(t, mse, true)
    
    		mso := new(mediumScalarOdd)
    		for i := range mso {
    			mso[i] = 122
    		}
    		runSubTestUserArenaNew(t, mso, true)
    
    		mpe := new(mediumPointerEven)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow_to_stablehlo/python/integration_test/tensorflow_to_stablehlo_test.py

          return x + 1
    
      model = AddOneModel()
    
      x_train = tf.constant([1, 2, 3, 4, 5], dtype=tf.float32)
      y_train = tf.constant([2, 3, 4, 5, 6], dtype=tf.float32)
    
      model.compile(optimizer='sgd', loss='mse')
      model.fit(x_train, y_train, epochs=1)
    
      path = tempdir + '/add_one_model'
      model.save(path)
      return path
    
    
    class TensorflowToStableHLOTest(test.TestCase):
    
      def test_saved_model_to_stablehlo(self):
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 22:58:42 UTC 2024
    - 2.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto

        // Use the histogram mid values that minimize MSE error.
        // This is very slow algorithm because it computes all errors for all
        // histogram mid value pairs. Therefore the value of num_bins is recommended
        // to be 256 or less.
        CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE = 4;
        // Use the histogram mid values that minimize MSE error.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 14.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py

      def _get_min_max_value_by_expanding_range(
          self, start_idx: int
      ) -> tuple[float, float]:
        """Starting from start_idx, expand left and right alternately to find the min value of mse loss.
    
        Args:
          start_idx: Index to start quantization.
    
        Returns:
          (min_value, max_value): Min and max calculated.
        """
        # Tuple of (mse_error, quant_min, quant_max).
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 11 19:29:56 UTC 2024
    - 14.7K bytes
    - Viewed (0)
  6. RELEASE.md

        *   Update metric name to always reflect what the user has given in compile.
            Affects following cases
        *   When name is given as 'accuracy'/'crossentropy'
        *   When an aliased function name is used eg. 'mse'
        *   Removing the `weighted` prefix from weighted metric names.
        *   Allow non-Tensors through v2 losses.
        *   Add v2 sparse categorical crossentropy metric.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 730.3K bytes
    - Viewed (0)
Back to top