Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for out_shape (0.12 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

        auto out_count = rewriter.create<MulOp>(
            loc, tensorflow::GetTypeFromTFTensorShape({}, out_size_element_ty),
            out_y, out_x);
    
        // Generate what the final output shape will look like.
        auto out_shape = rewriter.create<PackOp>(
            loc, tensorflow::GetTypeFromTFTensorShape({4}, out_size_element_ty),
            ValueRange({batch, out_y, out_x, channels}));
    
        // Compute the indices along the vertical dimension.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composit_functions_debugging.mlir

    // TF-DAG: %[[conv1_dequantized_0:.*]] = "tf.PartitionedCall"(%[[conv1_quantized]], %[[out_scale]], %[[in_out_zp]]) <{config = "", config_proto = "", executor_type = "", f = @dequantize_i8}>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 06 01:23:21 UTC 2023
    - 80.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir

    // quantize_conv_with_bias_dynamic_fn, omitting stablehlo.maximum.
    // This is because activation clipping which includes 0.0f can be simply
    // omitted from the graph as the lifted function's out_scale and out_zp are
    // already calculated based on the clipped distribution.
    // Note that the resulting scale and zero point should be calculated based on
    // clipped range [0, r_max].
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 91.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tfr/python/tfr_gen.py

            print('TODO: use "node_type"')
          if node.attr == 'shape' and tensor_type == TFRTypes.TENSOR:
            ssa_value = self._ssa_name('shape')
            self._emit_with_loc(
                '\n{} = tfr.get_shape {} -> !shape.shape'.format(ssa_value, value),
                node)
            return (ssa_value, TFRTypes.SHAPE)
    
        if isinstance(node.value, ast.Attribute):
          if isinstance(node.value.value, ast.Name):
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 27 15:27:03 UTC 2022
    - 55.8K bytes
    - Viewed (0)
Back to top