Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 130 for Bias (0.17 sec)

  1. tensorflow/compiler/mlir/lite/utils/lstm_utils.cc

      SetWeightForRecurrentToForgetGate();
      SetWeightForRecurrentToOutputGate();
    
      // Extract bias to cifg gates via slicing the bias tensor
      SetBiasToCellGate();
      SetBiasToInputGate();
      SetBiasToForgetGate();
      SetBiasToOutputGate();
    
      // Extract projection and set an empty projection bias
      SetProjection();
      SetProjectionBias();
    
      // Set the variable tensors
      SetInputActivationState();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 36.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc

      // TensorFlow Conv3D has no bias, optimization patterns will fuse Conv3D
      // with other ops can fill the bias.
      Value none = rewriter.create<TFL::NoValueOp>(
          op->getLoc(), rewriter.getNoneType(), rewriter.getUnitAttr());
    
      rewriter.replaceOpWithNewOp<TFL::Conv3DOp>(
          op, tf_op.getType(), tf_op.getInput(), tf_op.getFilter(),
          /*bias=*/none, dilation_depth_factor, dilation_height_factor,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 20:06:54 UTC 2024
    - 45.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

    //                         PatternRewriter &rewriter, Location loc,
    //                         Type result_type, Value input,
    //                         Value filter, Value bias) const;
    //
    // And also the following method for getting the dimension for bias tensor:
    //
    //  int64_t getBiasDim(ArrayRef<int64_t> filterShape) const;
    template <typename ConcreteType, typename TFConvOpType>
    class ConvertTFConvOp : public RewritePattern {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/convert_tf_quant_ops_to_mhlo.mlir

      // tensor_proto that points to dense<127> of type !tf_type.qint32.
      // CHECK-DAG: %[[RHS:.*]] = mhlo.constant() <{value = dense<127> : tensor<2xi32>}> : () -> tensor<2x!quant.uniform<i32:f32, 2.000000e+00:4>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.td

    def OptimizeIntGraph : Pass<"optimize-int-graph", "mlir::func::FuncOp"> {
      let summary = "Optimization patterns for quantized integer graph";
    
      let description = [{
        This includes patterns for merging addition of zp offset and bias.
      }];
    
      let constructor = "mlir::quant::stablehlo::CreateOptimizeIntGraphPass()";
      let dependentDialects = ["mhlo::MhloDialect"];
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 23 01:41:18 UTC 2024
    - 2.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

      %bias = arith.constant dense<1.0> : tensor<32xf32>
      %input = "tfl.dequantize"(%arg0) : (tensor<1x224x224x3x!quant.uniform<u8:f32, 1.5>>) -> tensor<1x224x224x3xf32>
      %weight = "tfl.dequantize"(%arg1) : (tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32:3, {1.0,2.0,3.0}>>) -> tensor<32x3x3x3xf32>
      %conv = "tfl.conv_2d"(%input, %weight, %bias) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      // CHECK-DAG: %[[WEIGHTS:.*]] = arith.constant dense<1.000000e+00> : tensor<32x4x4x128xf32>
      // CHECK-DAG: %[[BIAS:.*]] = arith.constant dense<1.500000e+00> : tensor<32xf32>
      // CHECK: %[[RESULT:.*]] = "tfl.transpose_conv"(%[[SHAPE]], %[[WEIGHTS]], %arg0, %[[BIAS]])
      // CHECK: return %[[RESULT]]
    }
    
    // CHECK-LABEL: fuseMulIntoTransposeConv
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_weight_only.mlir

    // PerTensor-DAG: %[[scale:.*]] = "tf.Const"() <{value = dense<0.0236220472> : tensor<f32>}> : () -> tensor<f32>
    // PerTensor-DAG: %[[zp:.*]] = "tf.Const"() <{value = dense<0> : tensor<i32>}> : () -> tensor<i32>
    // PerTensor-DAG: %[[bias:.*]] = "tf.Const"() <{value = dense<0.000000e+00> : tensor<3xf32>}>
    // PerTensor: %[[out_1:.*]] = "tf.PartitionedCall"(%arg0, %[[q_w1]], %[[scale]], %[[zp]]) <{config = "", config_proto = "", executor_type = "",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.3K bytes
    - Viewed (0)
  9. src/cmd/vendor/github.com/ianlancetaylor/demangle/rust.go

    			}
    
    			i += int(digit) * w
    			if i < 0 {
    				rst.fail("punycode number overflow")
    			}
    
    			var t int
    			if k <= bias {
    				t = tmin
    			} else if k > bias+tmax {
    				t = tmax
    			} else {
    				t = k - bias
    			}
    
    			if int(digit) < t {
    				break
    			}
    
    			if w >= math.MaxInt32/base {
    				rst.fail("punycode number overflow")
    			}
    			w *= base - t
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Aug 15 16:39:48 UTC 2023
    - 23.3K bytes
    - Viewed (0)
  10. src/math/big/float.go

    			ebits = fbits - mbits - 1 //     8  exponent size
    			bias  = 1<<(ebits-1) - 1  //   127  exponent bias
    			dmin  = 1 - bias - mbits  //  -149  smallest unbiased exponent (denormal)
    			emin  = 1 - bias          //  -126  smallest unbiased exponent (normal)
    			emax  = bias              //   127  largest unbiased exponent (normal)
    		)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Jun 06 15:46:54 UTC 2024
    - 44.5K bytes
    - Viewed (0)
Back to top