Search Options

Results per page
Sort
Preferred Languages
Advance

Results 131 - 138 of 138 for relu6 (0.07 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py

          padding: str = 'SAME',
          has_func_alias: bool = False,
      ) -> module.Module:
        class ConvModel(module.Module):
          """A simple model with a single conv2d, bias and relu."""
    
          def __init__(self):
            self.out_channel_size = filter_shape[-1]
    
            # This ensures filters will have different value range per out channel
            self.filters = np.stack(
                [
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

          dilations: Sequence[int] = (1, 1, 1, 1),
          padding: str = 'SAME',
      ):
        class DepthwiseConvModel(module.Module):
          """A simple model with a single depthwise conv2d, bias and relu."""
    
          def __init__(self):
            self.out_channel_size = filter_shape[2] * filter_shape[3]
    
            # This ensures filters will have different value range per out channel
            self.filters = np.stack(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfr/python/tfr_gen_test.py

      y = _tfr_quant_raw_data(x)
      s, z = _tfr_quant_qparam(x)
      s = _tfr_quant_scale_factor(1.0, [s, s])
      s = _tfr_quant_scale_factor(1.0, [s])
      y = math_ops.Sub(y, z)
      qmin, qmax = _tfr_quant_act_range('RELU', 1.0, 0)
      (qmin, qmax)  # pylint: disable=pointless-statement
      d = _tfr_quant_rescale(y, s, 0)
      e = math_ops.Cast(x=d, DstT=dtypes.int16)
      f = math_ops.Cast(x=e, DstT=dtypes.int8)
      return f
    
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 13 16:33:28 UTC 2021
    - 28.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir

      %5 = "quantfork.stats"(%4) {layerStats = dense<[-56.2916565, 122.922478]> : tensor<2xf32>} : (tensor<1x4xf32>) -> tensor<1x4xf32>
      %6 = "tfl.svdf"(%0, %1, %2, %3, %5) {fused_activation_function = "RELU", rank = 1 : i32} : (tensor<1x3xf32>, tensor<2x3xf32>, tensor<2x1xf32>, tensor<2xf32>, tensor<1x4xf32>) -> tensor<1x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 52.6K bytes
    - Viewed (0)
  5. src/cmd/vendor/golang.org/x/arch/x86/x86asm/decode.go

    	xArgRM64         // arg r/m64
    	xArgRM8          // arg r/m8
    	xArgReg          // arg reg
    	xArgRegM16       // arg reg/m16
    	xArgRegM32       // arg reg/m32
    	xArgRegM8        // arg reg/m8
    	xArgRel16        // arg rel16
    	xArgRel32        // arg rel32
    	xArgRel8         // arg rel8
    	xArgSS           // arg SS
    	xArgST           // arg ST, aka ST(0)
    	xArgSTi          // arg ST(i) with +i in opcode
    	xArgSreg         // arg Sreg
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Feb 10 18:59:52 UTC 2023
    - 45.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/tf-ops.mlir

        "tf.Yield"(%t0, %t1, %t2) : (tensor<2xf32>, tensor<2xf32>, tensor<2xf32>) -> ()
        }, {
         %e0 = "tf.Neg"(%arg1) : (tensor<2xf32>) -> tensor<2xf32>
         %e1 = "tf.Relu"(%arg1) : (tensor<2xf32>) -> tensor<2xf32>
         %e2 = "tf.Sin"(%arg1) : (tensor<2xf32>) -> tensor<2xf32>
         "tf.Yield"(%e0, %e1, %e2) : (tensor<2xf32>, tensor<2xf32>, tensor<2xf32>) -> ()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 23 14:40:35 UTC 2023
    - 236.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc

    //===----------------------------------------------------------------------===//
    
    OpFoldResult LeakyReluOp::fold(FoldAdaptor adaptor) {
      auto operands = adaptor.getOperands();
      assert(operands.size() == 1 && "leaky relu has one operand");
    
      // leaky_relu(x, alpha: 1) -> x
      if (getAlpha().convertToFloat() == 1.0f &&
          getOperand().getType() == getType())
        return getOperand();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 146.7K bytes
    - Viewed (0)
  8. RELEASE.md

        to matrix multiplication and convolution, these building blocks include:
        Direct batched convolution Pooling: maximum, minimum, average Normalization:
        LRN, batch normalization Activation: rectified linear unit (ReLU) Data
        manipulation: multi-dimensional transposition (conversion), split, concat,
        sum and scale.
    
    *   TensorForest Estimator now supports SavedModel export for serving.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 730.3K bytes
    - Viewed (0)
Back to top