Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 12 for input_to_output_intermediate (0.34 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir

          input_to_cell_intermediate = tensor<!quant.calibrated<f32<-4.000000e+00:4.000000e+00>>>,
          input_to_forget_intermediate = tensor<!quant.calibrated<f32<-1.600000e+01:1.600000e+01>>>,
          input_to_output_intermediate = tensor<!quant.calibrated<f32<-1.000000e+00:1.000000e+00>>>,
          proj_clip = 0.000000e+00 : f32,time_major = false} : (
            tensor<1x5xf32>,
            none, tensor<2x5xf32>, tensor<2x5xf32>, tensor<2x5xf32>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 52.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/lstm.mlir

    = tensor<0x!quant.uniform<i16:f32, 0.0049890000373125076>>, input_to_forget_intermediate = tensor<0x!quant.uniform<i16:f32, 0.0078849997371435165>>, input_to_cell_intermediate = tensor<0x!quant.uniform<i16:f32, 0.0087630003690719604>>, input_to_output_intermediate = tensor<0x!quant.uniform<i16:f32, 0.0057529998011887074>>, effective_hidden_scale_intermediate = tensor<0x!quant.uniform<i8:f32, 0.0075630000792443752:2>>, kernel_type = #tfl<lstm_kernel_type_attr FULL>, proj_clip = 0.01 : f32} : (ten...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/lstm.json

    // CHECK-SAME: input_to_forget_intermediate = tensor<*x!quant.calibrated<f32<-1.600000e+01:1.600000e+01>>>
    // CHECK-SAME: input_to_input_intermediate = tensor<*x!quant.calibrated<f32<-3.200000e+01:3.200000e+01>>>
    // CHECK-SAME: input_to_output_intermediate = tensor<*x!quant.calibrated<f32<-1.000000e+00:1.000000e+00>>>
    
    // Checks if calibrated type is exported back to quantized type.
    // RoundTrip: name: "effective_hidden_scale_intermediate",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 06:25:50 UTC 2024
    - 9.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

          fused_activation_function = "TANH",
          input_to_cell_intermediate = tensor<0xf32>,
          input_to_forget_intermediate = tensor<0xf32>,
          input_to_input_intermediate = tensor<0xf32>,
          input_to_output_intermediate = tensor<0xf32>,
          proj_clip = 0.000000e+00 : f32,
          time_major = false} : (
            tensor<1x2x3xf32>,
            tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>, tensor<1x1xf32>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/unidirectional_sequence_lstm.mlir

    // CHECK-NEXT:       name: "input_to_cell_intermediate",
    // CHECK-NEXT:       has_rank: true
    // CHECK-NEXT:     }, {
    // CHECK-NEXT:       shape: [ 0 ],
    // CHECK-NEXT:       name: "input_to_output_intermediate",
    // CHECK-NEXT:       has_rank: true
    // CHECK-NEXT:     }, {
    // CHECK-NEXT:       shape: [ 0 ],
    // CHECK-NEXT:       type: INT8,
    // CHECK-NEXT:       name: "effective_hidden_scale_intermediate",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 06 18:55:51 UTC 2023
    - 11.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/lstm_quantized.mlir

    = tensor<0x!quant.uniform<i16:f32, 0.0049890000373125076>>, input_to_forget_intermediate = tensor<0x!quant.uniform<i16:f32, 0.0078849997371435165>>, input_to_cell_intermediate = tensor<0x!quant.uniform<i16:f32, 0.0087630003690719604>>, input_to_output_intermediate = tensor<0x!quant.uniform<i16:f32, 0.0057529998011887074>>, effective_hidden_scale_intermediate = tensor<0x!quant.uniform<i8<-127:127>:f32, 0.0075630000792443752:2>>, kernel_type = #tfl<lstm_kernel_type_attr FULL>, proj_clip = 0.01 : f32}...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 15.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/utils/lstm_utils.cc

          /*input_to_input_intermediate=*/mlir::TypeAttr(),
          /*input_to_forget_intermediate=*/mlir::TypeAttr(),
          /*input_to_cell_intermediate=*/mlir::TypeAttr(),
          /*input_to_output_intermediate=*/mlir::TypeAttr(),
          /*effective_hidden_scale_intermediate=*/mlir::TypeAttr());
    
      // Cast the static shaped lstm result to FuncOp's signature -
      // Ranked but unknown 2nd dimension to support stacking these.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 36.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h

    constexpr const char* intermediate_attributes[] = {
        "input_to_input_intermediate", "input_to_forget_intermediate",
        "input_to_cell_intermediate", "input_to_output_intermediate",
        "effective_hidden_scale_intermediate"};
    
    // Calculates the minimum power of two that is not less than the value.
    double PowerOfTwoBound(double value);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 28K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/ops.mlir

    tensor<0x!quant.uniform<i8<-127:127>:f32, 0.0077881771139800549>>, fused_activation_function = "TANH", input_to_cell_intermediate = tensor<0xf32>, input_to_forget_intermediate = tensor<0xf32>, input_to_input_intermediate = tensor<0xf32>, input_to_output_intermediate = tensor<0xf32>, proj_clip = 0.000000e+00 : f32, time_major = false}> : (tensor<?x?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>, tensor<?xf32>,...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/flatbuffer_import.cc

        // Create intermediate value
    
        const llvm::SmallVector<llvm::StringRef, 5> kIntermediateNames = {
            "input_to_input_intermediate", "input_to_forget_intermediate",
            "input_to_cell_intermediate", "input_to_output_intermediate",
            "effective_hidden_scale_intermediate"};
        for (auto type_and_name :
             llvm::zip(intermediate_types, kIntermediateNames)) {
          mlir::TypeAttr type_attr =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 66.8K bytes
    - Viewed (0)
Back to top