Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 106 for Bias (0.04 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

            Args:
              weight_shape: Shape of the weight tensor.
              bias_size: If None, do not use bias. Else, use given size as bias.
              activation_fn: The activation function to be used. No activation
                function if None.
              use_biasadd: If True, use BiasAdd for adding bias, else use AddV2.
            """
            self.bias_size = bias_size
            self.activation_fn = activation_fn
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir

      %weight = arith.constant dense_resource<__elided__> : tensor<2x3x3x2xf32>
      %bias = arith.constant dense<[7.11401462, 7.05456924]> : tensor<2xf32>
    
      %q_input= "quantfork.qcast"(%input) : (tensor<1x3x4x3xf32>) -> tensor<1x3x4x3x!quant.uniform<i8:f32, 0.58810077742034317:-128>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 6.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc

    template <typename T>
    Operation* GetBroadcastedUserOp(Operation* op) {
      // Broadcast bias for known input shape.
      auto broadcast_in_dim_op = FindUserOfType<BroadcastInDimOp>(op);
      if (broadcast_in_dim_op != nullptr) {
        auto target_op = FindUserOfType<T>(broadcast_in_dim_op);
        if (target_op != nullptr) return target_op;
      }
      // Broadcast bias for unknown input shape.
      auto get_dimension_size_op = FindUserOfType<GetDimensionSizeOp>(op);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 06:04:36 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

      func.return %fc : tensor<1x2xf32>
    // CHECK-DAG: %[[weight:.*]] = arith.constant dense<{{\[\[}}0.000000e+00, 1.000000e+00]
    // CHECK-DAG: %[[bias:.*]] = arith.constant dense<[0.000000e+00, 2147364.75]>
    // CHECK-DAG: %[[b_q:.*]] = "tfl.quantize"(%[[bias]]){{.*}}quant.uniform<i32:f32:0, {7.8740158861230386E-10,0.0019998892694710656}>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/optional_input.json

    // RUN: json_to_flatbuffer %p/test_schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir -o - | FileCheck %s
    
    // This test is to test that if the flatbuffer omits the last optional input `bias` of tfl.conv_2d op, the flatbuffer_importer will automatically adds `none` value to tfl.conv_2d.
    
    // CHECK: %[[CST:.*]] = "tfl.no_value"() <{value}> : () -> none
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

          EXPECT_NE(float_tensor, nullptr);
          // If the tensor is a weight, it should have type INT8, otherwise it
          // should stay with type FLOAT32.
          // If the tensor is a bias, it should have type FLOAT32.
          //
          // Check with float_tensor name since quantized tensor
          // may be renamed.
          if (float_tensor->name()->str() == "conv_bias") {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc

          result_type = bias_add.getResult().getType();
        }
    
        auto fused_loc = rewriter.getFusedLoc(locations);
    
        // The fused contraction has the same operands as the original contraction
        // with `bias` from the BiasAddOp appended.
        SmallVector<Value, 4> operands(contraction.operand_begin(),
                                       contraction.operand_end());
        operands.push_back(bias_add.getBias());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.9K bytes
    - Viewed (0)
  8. tensorflow/c/experimental/gradients/nn_grad.cc

      Status Compute(AbstractContext* ctx,
                     absl::Span<AbstractTensorHandle* const> grad_outputs,
                     absl::Span<AbstractTensorHandle*> grad_inputs) override {
        /* Given upstream grad U and a BiasAdd: A + bias, the gradients are:
         *
         *    dA = U
         *    dbias = reduceSum(U, dims = channel_dim)
         */
    
        AbstractTensorHandle* upstream_grad = grad_outputs[0];
        DCHECK(upstream_grad);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 09 06:38:45 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc

      // Restrict maximum absolute value of bias within INT_MAX / 2, to make some
      // room for accumulator.
      if (auto bias_quantized_type = mlir::dyn_cast<UniformQuantizedType>(params);
          bias_quantized_type != nullptr) {
        double bias_half_range = 0.0f;
        for (auto bias : bias_values.getValues<APFloat>()) {
          if (bias_half_range < std::abs(bias.convertToFloat())) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 38.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/quantize_passes.cc

              .has_preset_quantization_method()) {
        quantization_options_ =
            mlir::quant::stablehlo::FillPresetQuantizationOptions(
                quantization_options);
      }
    
      // TODO: b/276999414 - Add activation and bias quantization component as
      // respective quantization passes are created.
      QuantizationComponentSpec weight_component;
      for (const auto& component : quantization_options_.quantization_method()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 08:32:43 UTC 2024
    - 2.3K bytes
    - Viewed (0)
Back to top