Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for whole_model (0.38 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op_stablehlo.mlir

        return %6 : tensor<?x2xf32>
      }
    
    // WholeModel-LABEL: func @matmul2
    // WholeModel-DAG: %[[b0:.*]] = stablehlo.constant dense<[-0.211145893
    // WholeModel-DAG: %[[w0:.*]] = stablehlo.constant dense<{{\[\[}}-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 18K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc

      Option<DebuggerType> debugger_type_{
          *this, "debugger_type",
          llvm::cl::init(DebuggerConfig::DEBUGGER_TYPE_UNSPECIFIED),
          llvm::cl::values(
              clEnumValN(DebuggerConfig::DEBUGGER_TYPE_WHOLE_MODEL, "whole_model",
                         "Whole model verify"),
              clEnumValN(DebuggerConfig::DEBUGGER_TYPE_INT_PER_LAYER,
                         "int_per_layer", "Int Per-layer verify"),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 13K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto

        DEBUGGER_TYPE_FLOAT_PER_LAYER = 3;
      }
    
      DebuggerType debugger_type = 1;
    
      // Path to save unquantized model with dump tensor ops attached.
      // Used when debugger_type is WHOLE_MODEL.
      string unquantized_dump_model_path = 2;
    
      // Path to save debugger related logs. Defaults to '/tmp/dumps'.
      string log_dir_path = 3;
    }
    
    // Defines various calibration options.
    // Next ID: 6
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 14.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir

        return %2 : tensor<?x2x2x2xf32>
      }
    
    // WholeModel-LABEL: func @multiple_conv2d
    // WholeModel-DAG: %[[b0:.*]] = "tf.Const"() <{value = dense<0.000000e+00>
    // WholeModel-DAG: %[[b1:.*]] = "tf.Const"() <{value = dense<1.000000e+00>
    // WholeModel-DAG: %[[w0:.*]] = "tf.Const"() <{value = dense<{{\[\[\[\[}}0.193340182, 0.285152316
    // WholeModel-DAG: %[[w1:.*]] = "tf.Const"() <{value = dense<{{\[\[\[\[}}-0.174680978, -0.367524445
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composit_functions_debugging.mlir

    // XLA_PerChannel-DAG: %[[out_scale:.*]] = "tf.Const"() <{value = dense<7.24974147E-4> : tensor<f32>}> : () -> tensor<f32>
    }
    
    // TODO(b/308773062): Add whole_model unit-test
    
    // -----
    
    module {
      func.func @matmul2_with_int_per_layer(%arg0: tensor<2x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> tensor<2x2xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 06 01:23:21 UTC 2023
    - 80.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

              outputs={output_key: output_tensor},
          )
    
        return in_placeholder
    
      def _create_while_model(self, input_shape: Sequence[int] = (1, 32, 32, 512)):
        class WhileModel(module.Module):
          """A model with a while op."""
    
          def __init__(self):
            w_shape = [3, 3] + [input_shape[-1], input_shape[-1]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
Back to top