Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 657 for const2 (0.89 sec)

  1. internal/ringbuffer/ring_buffer_benchmark_test.go

    	go func() {
    		for {
    			rb.Read(buf)
    		}
    	}()
    
    	b.ResetTimer()
    	for i := 0; i < b.N; i++ {
    		rb.Write(data)
    	}
    }
    
    func BenchmarkRingBuffer_AsyncReadBlocking(b *testing.B) {
    	const sz = 512
    	const buffers = 10
    	rb := New(sz * buffers)
    	rb.SetBlocking(true)
    	data := []byte(strings.Repeat("a", sz))
    	buf := make([]byte, sz)
    
    	go func() {
    		for {
    			rb.Read(buf)
    		}
    	}()
    
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Wed May 15 00:11:04 UTC 2024
    - 1.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op.cc

              ids_[idx], static_cast<CalibrationMethod>(calibration_methods_[idx]));
    
          const Tensor& min_tensor = context->input(3 * idx);
          const Tensor& max_tensor = context->input(3 * idx + 1);
          const Tensor& histogram_tensor = context->input(3 * idx + 2);
    
          const float min_value = min_tensor.scalar<float>()();
          const float max_value = max_tensor.scalar<float>()();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 13 01:31:23 UTC 2024
    - 8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/python/converter_python_api.cc

      const tflite::TensorType inference_tensor_type =
          FromTocoDataTypeToTflitToTensorType(inference_type);
      const tflite::TensorType input_type =
          FromTocoDataTypeToTflitToTensorType(input_data_type);
      const tflite::TensorType output_type =
          FromTocoDataTypeToTflitToTensorType(output_data_type);
    
      std::string output_model;
      const absl::string_view input_model_buffer(buf, length);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 19.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc

      return new_config;
    }
    
    bool HasQuantizationMethod(const QuantizationSpecs& specs,
                               Method::MethodCase method_case) {
      for (const auto& spec : specs.specs()) {
        if (spec.method().method_case() == method_case) {
          return true;
        }
      }
      return false;
    }
    
    QuantizationConfig PopulateDefaults(
        const QuantizationConfig& user_provided_config) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/shape_inference.h

    // attribute of `Placeholder` op.
    Status InferShapes(Graph* graph, const std::map<int, InferredShape>& arg_shapes,
                       const tensorflow::FunctionLibraryDefinition* fnlib_def,
                       GraphShapeInfo* shape_info);
    
    // Merges two InferredShapes. Return an error if the two shapes cannot be
    // merged.
    absl::StatusOr<InferredShape> MergeInferredShapes(const InferredShape& a,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 00:41:19 UTC 2024
    - 2.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow_to_stablehlo/tf_to_stablehlo.cc

    // from a saved model or from an mlir file.
    absl::StatusOr<quant::stablehlo::ImportedMlirModuleOp> ImportSavedModelOrTfMlir(
        absl::string_view input_path, MLIRContext* context,
        const std::vector<std::string>& exported_model_signatures,
        const std::vector<std::string>& tag_names, bool is_input_mlir_module) {
      if (is_input_mlir_module) {
        std::string error_message;
        std::unique_ptr<llvm::MemoryBuffer> file =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 22:58:42 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  7. tensorflow/c/experimental/ops/gen/common/path_config.cc

    #include "tensorflow/core/lib/strings/str_util.h"
    #include "tensorflow/core/platform/types.h"
    
    namespace tensorflow {
    namespace generator {
    
    PathConfig::PathConfig(const string& output_dir, const string& source_dir,
                           const string& api_dir_list,
                           const std::vector<string> op_names)
        : output_path(output_dir), op_names(op_names) {
      api_dirs = str_util::Split(api_dir_list, ",", str_util::SkipEmpty());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 09:51:28 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/transforms/lower_static_tensor_list.cc

    template <typename R>
    void UpdateTensorListResultTypesForIf(
        const llvm::SmallSet<int, 4> &tensor_list_index,
        const llvm::SmallSet<int, 4> &resized_tensor_list_index,
        const llvm::DenseMap<int, int> &tensor_list_map, ArrayRef<Type> types,
        R &&range, ValueRange operands,
        llvm::SmallVectorImpl<Type> *updated_types) {
      int i = 0;
      for (const auto it : llvm::zip(types, range)) {
        if (!tensor_list_index.count(i)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 20:00:43 UTC 2024
    - 70.7K bytes
    - Viewed (0)
  9. src/go/printer/printer_test.go

    	"go/parser"
    	"go/token"
    	"internal/diff"
    	"io"
    	"os"
    	"path/filepath"
    	"testing"
    	"time"
    )
    
    const (
    	dataDir  = "testdata"
    	tabwidth = 8
    )
    
    var update = flag.Bool("update", false, "update golden files")
    
    var fset = token.NewFileSet()
    
    type checkMode uint
    
    const (
    	export checkMode = 1 << iota
    	rawFormat
    	normNumber
    	idempotent
    	allowTypeParams
    )
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Jun 03 14:56:25 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/control_flow.mlir

    func.func @while_test() -> (tensor<i32>) {
      // CHECK: [[CONST:%.*]] = tfrt_fallback_async.const_dense_tensor dense<0> : tensor<i32>
      %0 = "tf.Const"() {device = "/device:CPU:0", value = dense<0> : tensor<i32>} : () -> tensor<i32>
      // CHECK: [[pred_res:%.*]]:2 = tfrt.call @"while_cond_lt9/tfrt_predicate"([[ARG0]], [[CONST]]) : (!tfrt.chain, !tfrt_fallback.tf_tensor) -> (!tfrt.chain, i1)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 00:40:32 UTC 2024
    - 17.5K bytes
    - Viewed (0)
Back to top