Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 106 for Bias (0.04 sec)

  1. tensorflow/compiler/mlir/lite/utils/lstm_utils.cc

      SetWeightForRecurrentToForgetGate();
      SetWeightForRecurrentToOutputGate();
    
      // Extract bias to cifg gates via slicing the bias tensor
      SetBiasToCellGate();
      SetBiasToInputGate();
      SetBiasToForgetGate();
      SetBiasToOutputGate();
    
      // Extract projection and set an empty projection bias
      SetProjection();
      SetProjectionBias();
    
      // Set the variable tensors
      SetInputActivationState();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 36.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc

      // TensorFlow Conv3D has no bias, optimization patterns will fuse Conv3D
      // with other ops can fill the bias.
      Value none = rewriter.create<TFL::NoValueOp>(
          op->getLoc(), rewriter.getNoneType(), rewriter.getUnitAttr());
    
      rewriter.replaceOpWithNewOp<TFL::Conv3DOp>(
          op, tf_op.getType(), tf_op.getInput(), tf_op.getFilter(),
          /*bias=*/none, dilation_depth_factor, dilation_height_factor,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 20:06:54 UTC 2024
    - 45.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

    //                         PatternRewriter &rewriter, Location loc,
    //                         Type result_type, Value input,
    //                         Value filter, Value bias) const;
    //
    // And also the following method for getting the dimension for bias tensor:
    //
    //  int64_t getBiasDim(ArrayRef<int64_t> filterShape) const;
    template <typename ConcreteType, typename TFConvOpType>
    class ConvertTFConvOp : public RewritePattern {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/convert_tf_quant_ops_to_mhlo.mlir

      // tensor_proto that points to dense<127> of type !tf_type.qint32.
      // CHECK-DAG: %[[RHS:.*]] = mhlo.constant() <{value = dense<127> : tensor<2xi32>}> : () -> tensor<2x!quant.uniform<i32:f32, 2.000000e+00:4>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.td

    def OptimizeIntGraph : Pass<"optimize-int-graph", "mlir::func::FuncOp"> {
      let summary = "Optimization patterns for quantized integer graph";
    
      let description = [{
        This includes patterns for merging addition of zp offset and bias.
      }];
    
      let constructor = "mlir::quant::stablehlo::CreateOptimizeIntGraphPass()";
      let dependentDialects = ["mhlo::MhloDialect"];
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 23 01:41:18 UTC 2024
    - 2.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

      %bias = arith.constant dense<1.0> : tensor<32xf32>
      %input = "tfl.dequantize"(%arg0) : (tensor<1x224x224x3x!quant.uniform<u8:f32, 1.5>>) -> tensor<1x224x224x3xf32>
      %weight = "tfl.dequantize"(%arg1) : (tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32:3, {1.0,2.0,3.0}>>) -> tensor<32x3x3x3xf32>
      %conv = "tfl.conv_2d"(%input, %weight, %bias) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      // CHECK-DAG: %[[WEIGHTS:.*]] = arith.constant dense<1.000000e+00> : tensor<32x4x4x128xf32>
      // CHECK-DAG: %[[BIAS:.*]] = arith.constant dense<1.500000e+00> : tensor<32xf32>
      // CHECK: %[[RESULT:.*]] = "tfl.transpose_conv"(%[[SHAPE]], %[[WEIGHTS]], %arg0, %[[BIAS]])
      // CHECK: return %[[RESULT]]
    }
    
    // CHECK-LABEL: fuseMulIntoTransposeConv
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  8. src/math/big/float.go

    			ebits = fbits - mbits - 1 //     8  exponent size
    			bias  = 1<<(ebits-1) - 1  //   127  exponent bias
    			dmin  = 1 - bias - mbits  //  -149  smallest unbiased exponent (denormal)
    			emin  = 1 - bias          //  -126  smallest unbiased exponent (normal)
    			emax  = bias              //   127  largest unbiased exponent (normal)
    		)
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Jun 06 15:46:54 UTC 2024
    - 44.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

        output = input / (bias + alpha * sqr_sum) ** beta
    
    For details, see [Krizhevsky et al., ImageNet classification with deep
    convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
      }];
    
      let arguments = (ins
          TFL_FpTensor:$input,
          I32Attr:$radius,
          F32Attr:$bias,
          F32Attr:$alpha,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_without_identity.pbtxt

    # MLIR:         %[[bias:.*]] = "tfl.pseudo_qconst"() <{qtype = tensor<186x!quant.uniform<i32:f32:0
    # MLIR:         %[[weight:.*]] = "tfl.pseudo_qconst"() <{qtype = tensor<186x1x1x256x!quant.uniform<i8<-127:127>:f32:0, {0.12581039038230116,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.8K bytes
    - Viewed (0)
Back to top