Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 58 for fully_connected (0.26 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir

    // CHECK: return %[[FULLY_CONNECTED]]
    
    // -----
    
    // Tests that when the weight tensor for `stablehlo.dot_general` has a
    // `stablehlo.constant` -> `stablehlo.transpose` pattern, the
    // `stablehlo.constant` is directly transformed to `tfl.pseudo_qconst`, which
    // becomes the rhs of `tfl.fully_connected`. This is because
    // `tfl.fully_connected` accepts a [o, i] format for rhs, which
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 106.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc

      OptimizeBatchMatmulPass() = default;
      OptimizeBatchMatmulPass(const OptimizeBatchMatmulPass &) {}
    
      void runOnOperation() override;
    };
    
    // Converts batch_matmul operation to fully_connected if rhs is a
    // constant tensor with rank 2
    struct ConvertBatchMatMulOp2FullyConnectedOp
        : public OpRewritePattern<TFL::BatchMatMulOp> {
      using OpRewritePattern<TFL::BatchMatMulOp>::OpRewritePattern;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/post-quantize.mlir

      func.return %2 : tensor<384x128x!quant.uniform<i8:f32, 2.0>>
    
    // CHECK-NEXT: %[[fc:.*]] = "tfl.fully_connected"{{.*}} -> tensor<384x128x!quant.uniform<i8:f32, 2.000000e+00>>
    // CHECK-NEXT: return %[[fc]]
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/tests/fold-constants-to-subgraph.mlir

      %0 = "tfl.fully_connected"(%arg0, %arg1, %arg2) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<384x512x!quant.uniform<i8:f32, 0.1>>, tensor<128x512x!quant.uniform<i8<-127:127>:f32, 0.01>>, tensor<128x!quant.uniform<i32:f32,...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

      %2 = "tfl.pseudo_const"() {value = dense<[[0.1, 0.1, 0.1]]> : tensor<1x3xf32>} : () -> tensor<1x3xf32>
      %3 = "tfl.pseudo_const"() {value = dense<[0.1]> : tensor<1xf32>} : () -> tensor<1xf32>
      %4 = "tfl.fully_connected"(%1, %2, %3) {asymmetric_quantize_inputs = false, fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<1x3xf32>, tensor<1x3xf32>, tensor<1xf32>) -> tensor<1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/const-fold.mlir

      %cst_weights = arith.constant dense<[[5.0, 7.0], [11.0, 13.0], [17.0, 19.0]]> : tensor<3x2xf32>
      %cst_bias = arith.constant dense<[23.0, 29.0, 31.0]> : tensor<3xf32>
    
      %0 = "tfl.fully_connected" (%cst_input, %cst_weights, %cst_bias) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<2xf32>, tensor<3x2xf32>, tensor<3xf32>) -> tensor<3xf32>
      func.return %0 : tensor<3xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 45.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/decompose-hybrid-quantization.mlir

      // CHECK-DAG: %[[VAL3:.+]] = "tfl.dequantize"(%[[VAL1]]) : (tensor<36x!quant.uniform<i32:f32, 1.000000e+00>>)
      // CHECK: %[[VAL4:.+]] = "tfl.fully_connected"(%arg0, %[[VAL2]], %[[VAL3]]) <{fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"}>
      // CHECK: return %[[VAL4]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

      %0 = "tfl.pseudo_qconst"() {qtype = tensor<128x!quant.uniform<i32:f32, 0.7>>, value = dense<0> : tensor<128xi32>} : () -> tensor<128x!quant.uniform<i32:f32, 0.7>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/common/tfl_pass_config.h

      // ops of the same device, under a `tf_device.launch` op.
      bool form_clusters = false;
      // If `unfold_batch_matmul` is true, the tf.BatchMatMul is unfolded to a set
      // of tfl.fully_connected ops.
      bool unfold_batch_matmul = true;
      // Whether to outline WhileOp at the end of the pipeline.
      bool outline_tf_while = false;
      // Whether to do shape inference.
      bool shape_inference = true;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:05:30 UTC 2024
    - 6.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/hardwares/cpu_hardware.cc

      TargetHardwareOpRegistration<CpuHardware, Op> Op##_CpuHardware_hardware( \
          Create);
    
    // Operation costs on CPU
    
    // Currently used for these ops:
    // tfl.conv_2d / tfl.depthwise_conv_2d / tfl.fully_connected
    class CpuConvOp : public TargetHardwareOperation {
      double GetOpCost(mlir::Operation* op) const override {
        float cost = 0.0;
        int64_t arithmetic_count;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 5.9K bytes
    - Viewed (0)
Back to top