Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 10 for bias_shape (0.2 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py

        x_shape = [label_to_size.get(x_label) for x_label in x_labels]
        y_shape = [label_to_size.get(y_label) for y_label in y_labels]
        bias_shape = None
        if use_bias:
          bias_shape = [label_to_size.get(out_label) for out_label in out_labels]
          bias_shape = bias_shape[-1:]
        contracting_dims = set()
    
        x_signature = list(x_shape)
        y_signature = list(y_shape)
        if generate_unknown_shape_signature:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc

      SmallVector<int64_t, 1> bias_shape{2};
      SmallVector<int64_t, 2> projection_shape{1, 2};
      SmallVector<int64_t, 1> layer_norm_scale{4};
      SmallVector<int64_t, 2> output_shape{1, 2};
      auto input_type = RankedTensorType::get(input_shape, builder->getF32Type());
      auto weight_type = RankedTensorType::get(weight_shape, builder->getF32Type());
      auto bias_type = RankedTensorType::get(bias_shape, builder->getF32Type());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

        x_shape = [label_to_size.get(x_label) for x_label in x_labels]
        y_shape = [label_to_size.get(y_label) for y_label in y_labels]
        bias_shape = None
        if use_bias:
          bias_shape = [label_to_size.get(out_label) for out_label in out_labels]
          bias_shape = bias_shape[-1:]
        contracting_dims = set()
    
        x_signature = list(x_shape)
        y_signature = list(y_shape)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

                num_bits=8,
                narrow_range=False,
            )
            return {'output': out}
    
        bias = None
        if has_bias:
          bias_shape = shapes[1][-1]
          if bias_shape is not None:
            bias = array_ops.constant(
                np.random.uniform(size=[shapes[1][-1]]), dtype=dtypes.float32
            )
        model = MatmulModel(bias)
        x = array_ops.constant(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc

            filter_quantized_element_type.getZeroPoint());
      }
    
      SmallVector<int64_t, 1> bias_shape = {filter_shape[0]};
      auto bias_type =
          RankedTensorType::getChecked(loc, bias_shape, bias_quantized_type);
    
      auto bias_value_type = RankedTensorType::getChecked(
          loc, std::move(bias_shape), rewriter.getI32Type());
      auto bias_value = DenseIntElementsAttr::get(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 09:00:19 UTC 2024
    - 99.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

          self,
          equation: str,
      ):
        _, y_shape, bias_shape, x_signature, y_signature = (
            self._prepare_sample_einsum_datashapes(equation, use_bias=True)
        )
    
        model = self._create_einsum_model(
            self._input_saved_model_path,
            equation,
            y_shape,
            x_signature,
            y_signature,
            bias_shape,
        )
    
        # Generate model input data.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc

        Operation* op, const int bias_index,
        const ArrayRef<int> non_bias_operand_indices,
        const AccumulatorScaleFunc func) {
      QuantState& bias_state = GetOperandQuantState(op, bias_index);
      if (!bias_state.IsEmpty()) {
        return bias_state.params;
      }
      std::vector<QuantizedType> op_types{};
      op_types.reserve(non_bias_operand_indices.size());
    
      int adjusted_quant_dim = -1;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 38.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc

        auto bcast_op_result_type =
            mlir::cast<RankedTensorType>(bcast_op_result.getType());
        const ArrayRef<int64_t> bcast_shape = bcast_op_result_type.getShape();
        const TensorType new_bcast_op_result_type = bcast_op_result_type.cloneWith(
            bcast_shape, accumulation_quantized_element_type);
        bcast_op_result.setType(new_bcast_op_result_type);
      }
    
      const auto add_op_result_type =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 06:04:36 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc

    }
    
    std::optional<RankedTensorType> InferWindowOutputShape(
        const ShapedType& base_shape, const xla::Window& window,
        Type element_type) {
      if (window.dimensions_size() != base_shape.getRank()) {
        llvm::errs() << "Window has dimension " << window.dimensions_size()
                     << " but base shape has dimension " << base_shape.getRank()
                     << "\n";
        return std::nullopt;
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 08 07:28:49 UTC 2024
    - 134.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

      for (size_t i = 0; i < out_channel_size; i++) {
        const float bias_scale = disable_per_channel_quantization_for_dense_
                                     ? bias_scales[0]
                                     : bias_scales[i];
        auto dequantized_value = bias_values[i] * bias_scale;
        EXPECT_THAT(dequantized_value,
                    FloatNear(bias_float_buffer[i], bias_scale / 2));
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
Back to top