Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 130 for Bias (0.07 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/optimize.mlir

    func.func @convolution_add_add(
        %lhs: tensor<?x3x2x1xi8>, %rhs: tensor<2x1x1x1xi8>,
        %zp_offset: tensor<?x2x2x1xi32>, %bias: tensor<1xi32>
      ) -> tensor<?x2x2x1xi32> {
      // CHECK-DAG: %[[conv:.*]] = mhlo.convolution
      // CHECK-DAG: %[[combined:.*]] = chlo.broadcast_add %[[zp_offset:.*]], %[[bias:.*]]
      // CHECK-DAG: %[[result:.*]] = chlo.broadcast_add %[[conv]], %[[combined]]
      // CHECK: return %[[result]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Feb 24 02:26:47 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc

              mlir::cast<quant::QuantizedType>(non_bias_type.getElementType());
          non_bias_types.push_back(non_bias_ele_type);
        } else {
          // The non-bias hasn't been quantized, let's skip this bias.
          break;
        }
      }
      // The non-bias hasn't been quantized, let's skip this bias.
      if (non_bias_types.size() != non_biases.size()) return {};
    
      return func(/*op_types=*/non_bias_types, /*adjusted_quant_dim=*/-1,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/passes/lift_quantizable_spots_as_functions_fusion.td

      [(IsNotInLiftedFunc $res), (IsStableHLOConstantOp $bias)], [], (addBenefit 5)>;
    
    def LiftDotGeneralWithBias : Pat<
      (StableHLO_AddOp:$res
        (StableHLO_DotGeneralOp
            $lhs, $rhs, $dot_dimension_numbers, $precision_config),
        (StableHLO_BroadcastInDimOp $bias, $dims)),
      (LiftAsTFXlaCallModule<"composite_dot_general_with_bias_fn">
        (ArgumentList $lhs, $rhs, $bias),
        (ResultList $res),
        (NamedAttributeList
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 07:19:09 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/optimize.mlir

    // CHECK-NEXT: %[[conv:.*]] = "tf.Conv2D"(%arg0, %[[cst]])
    // CHECK-NEXT: %[[bias:.*]] = "tf.AddV2"(%[[conv]], %[[cst_0]])
    // CHECK-NEXT: return %[[bias]] : tensor<256x8x7x16xf32>
    }
    
    // CHECK-LABEL: convaddv2mul
    func.func @convaddv2mul(%arg: tensor<256x32x32x3xf32>) -> tensor<256x8x7x16xf32> {
      %filter = arith.constant dense<2.0> : tensor<3x3x3x16xf32>
      %bias = arith.constant dense<3.0> : tensor<16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.td

    // Pattern rules for lifting ops with bias as functions
    //===----------------------------------------------------------------------===//
    
    def LiftDepthwiseConv2dNativeWithBias : Pat<
      (TF_BiasAddOp:$res
        (TF_DepthwiseConv2dNativeOp $input, $filter, $strides, $padding,
          $explicit_paddings, IsDataFormatNHWC:$data_format, $dilations),
        $bias, IsDataFormatNHWC:$bias_data_format),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 15.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.td

      ),
      [(AreLastTwoDimsTransposed $perm_value), (IsNoneType $bias)]>;
    
    // Fuses TFL_FullyConnectedOp and TFL_TransposeOp Rhs to TFL_BatchMatMulOp
    def FuseTransposeFCRhsToBatchMatmul : Pat<
      (TFL_FullyConnectedOp
        2DTensorOf<[F32]>:$lhs,
        (TFL_TransposeOp TensorOf<[F32]>:$rhs, (Arith_ConstantOp:$perm_value $p0)),
        $bias, $TFL_AF_None, $TFL_FCWO_Default,
        $keep_num_dims, $asymmetric_quantize_inputs
        ),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 09 23:44:09 UTC 2023
    - 2.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/optimize.cc

            auto attr = rewriter.getZeroAttr(type);
            bias = rewriter.create<arith::ConstantOp>(add_op.getLoc(), type, attr);
            auto none_af = rewriter.getStringAttr("NONE");
    
            bias =
                rewriter.create<AddOp>(add_op.getLoc(), bias, constant_val, none_af)
                    .getOutput();
          } else {
            // If there no pre-existing bias and the `constant_val` is 1D, simply
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  8. okhttp/src/main/kotlin/okhttp3/internal/idn/Punycode.kt

                  when {
                    k <= bias -> TMIN
                    k >= bias + TMAX -> TMAX
                    else -> k - bias
                  }
                if (q < t) break
                result.writeByte((t + ((q - t) % (BASE - t))).punycodeDigit)
                q = (q - t) / (BASE - t)
              }
    
              result.writeByte(q.punycodeDigit)
              bias = adapt(delta, h + 1, h == b)
              delta = 0
    Registered: Sun Jun 16 04:42:17 UTC 2024
    - Last Modified: Wed Apr 03 03:04:50 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td

        (TF_BiasAddOp:$bias_add
          $conv_out,
          (TF_ConstOp:$bias IsFloatElementsAttr:$bias_value), $data_format),
        (TF_ConstOp:$add_rhs IsFloatElementsAttr:$add_rhs_value)),
      (TF_BiasAddOp
        $conv_out, (TF_AddV2Op $bias, (ReshapeTo1DTensor $add_rhs)), $data_format),
      [(HasOneUse $bias_add),
       (ReshapableTo1DTensor $add_rhs),
       (HasEqualElementSize<[-1], [-1]> $bias, $add_rhs)]>;
    
    // Fuse AffineOp followed by an MulOp patterns.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_traits.h

          : public QuantizationSpecTraitBase<
                ConcreteType, AccumulatorUniformScale<Bias, Operands...>::Impl> {
       public:
        // Whether the index-th operand is a bias.
        static bool IsBias(int index) { return index == Bias; }
    
        // Returns the indexes of all the non-bias operands.
        static std::vector<int> GetAllNonBiasOperands() {
          return std::vector<int>({Operands...});
        }
      };
    };
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 5.8K bytes
    - Viewed (0)
Back to top