Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 280 for arity (0.1 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/fold-broadcast.mlir

      %cst = arith.constant dense<[3, 5, 7]> : tensor<3xi32>
      %0 = "tf.BroadcastTo"(%arg1, %cst) : (tensor<5xf32>, tensor<3xi32>) -> tensor<3x5x7xf32>
      %1 = "tf.Mul"(%arg0, %0) : (tensor<5x7xf32>, tensor<3x5x7xf32>) -> tensor<3x5x7xf32>
      func.return %1 : tensor<3x5x7xf32>
      // CHECK: %[[C0:.*]] = arith.constant dense<[3, 5, 7]> : tensor<3xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/utils/constant_utils.h

    ==============================================================================*/
    
    #ifndef TENSORFLOW_COMPILER_MLIR_LITE_UTILS_CONSTANT_UTILS_H_
    #define TENSORFLOW_COMPILER_MLIR_LITE_UTILS_CONSTANT_UTILS_H_
    
    #include "mlir/Dialect/Arith/IR/Arith.h"  // from @llvm-project
    #include "mlir/Dialect/Func/IR/FuncOps.h"  // from @llvm-project
    #include "mlir/IR/BuiltinTypes.h"  // from @llvm-project
    #include "mlir/IR/Location.h"  // from @llvm-project
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 27 06:24:28 UTC 2024
    - 1.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/optimize_batch_matmul.mlir

      %0 = arith.constant dense<[[1.0], [2.0]]> : tensor<2x1xf32>
      %1 = "tfl.batch_matmul"(%arg0, %0) {adj_x = false, adj_y = false, asymmetric_quantize_inputs = false} : (tensor<4x128x2xf32>, tensor<2x1xf32>) -> tensor<4x128x1xf32>
      func.return %1 : tensor<4x128x1xf32>
      // CHECK-NEXT: %[[CONST_WEIGHT:.*]] = arith.constant
      // CHECK-SAME: [1.000000e+00, 2.000000e+00]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/optimize.mlir

    // CHECK-LABEL: convbiasaddmul
    func.func @convbiasaddmul(%arg: tensor<256x32x32x3xf32>) -> tensor<256x8x7x16xf32> {
      %filter = arith.constant dense<2.0> : tensor<3x3x3x16xf32>
      %bias = arith.constant dense<3.0> : tensor<16xf32>
      %value = arith.constant dense<4.0> : tensor<16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  5. internal/config/storageclass/help.go

    	}
    
    	Help = config.HelpKVS{
    		config.HelpKV{
    			Key:         ClassStandard,
    			Description: `set the parity count for default standard storage class` + defaultHelpPostfix(ClassStandard),
    			Optional:    true,
    			Type:        "string",
    		},
    		config.HelpKV{
    			Key:         ClassRRS,
    			Description: `set the parity count for reduced redundancy storage class` + defaultHelpPostfix(ClassRRS),
    			Optional:    true,
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Tue Mar 26 22:06:19 UTC 2024
    - 1.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx_test.cc

      %index = arith.constant 0 : index
      %cst = arith.constant 1 : i64
      %eq = arith.cmpi "eq", %n, %cst : i64
      %v1 = tfr.get_element %values[%index] : (!tfr.tensor_list, index) -> !tfr.tensor
      %res = scf.if %eq -> !tfr.tensor {
        scf.yield %v1 : !tfr.tensor
      } else {
        %step = arith.index_cast %cst : i64 to index
        %end = arith.index_cast %n : i64 to index
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 5.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/signature_def.mlir

        %cst = arith.constant dense<0.000000e+00> : tensor<5xf32>
        %cst_0 = arith.constant dense<1.0> : tensor<5x384xf32>
        %cst_1 = arith.constant dense<1.0> : tensor<5x384xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 06 18:55:51 UTC 2023
    - 4.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

      %cst = arith.constant dense<25.4> : tensor<2x3xf32>
      func.return %cst : tensor<2x3xf32>
    
    // CHECK-NEXT:  %[[cst:.*]] = arith.constant dense<2.540000e+01> : tensor<2x3xf32>
    // CHECK-NEXT:  "tfl.quantize"(%[[cst]]) <{qtype = tensor<2x3x!quant.uniform<u8:f32, 0.099607841641295186>>}> {volatile}
    }
    
    // CHECK-LABEL: QuantizePositiveScalar
    func.func @QuantizePositiveScalar() -> tensor<f32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils_test.cc

    ==============================================================================*/
    
    #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils.h"
    
    #include <gtest/gtest.h>
    #include "mlir/Dialect/Arith/IR/Arith.h"  // from @llvm-project
    #include "mlir/Dialect/Func/IR/FuncOps.h"  // from @llvm-project
    #include "mlir/IR/Builders.h"  // from @llvm-project
    #include "mlir/IR/MLIRContext.h"  // from @llvm-project
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 25 00:32:20 UTC 2024
    - 2.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc

    // This transformation pass applies quantization propagation on TF dialect.
    
    #include <algorithm>
    #include <memory>
    #include <string>
    #include <utility>
    #include <vector>
    
    #include "mlir/Dialect/Arith/IR/Arith.h"  // from @llvm-project
    #include "mlir/Dialect/Func/IR/FuncOps.h"  // from @llvm-project
    #include "mlir/Dialect/Quant/QuantOps.h"  // from @llvm-project
    #include "mlir/Dialect/Quant/QuantTypes.h"  // from @llvm-project
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.5K bytes
    - Viewed (0)
Back to top