Search Options

Results per page
Sort
Preferred Languages
Advance

Results 111 - 120 of 839 for nilcheck (0.23 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/preprocess_op_weight_only.mlir

    // RUN: tf-quant-opt %s -split-input-file -quant-preprocess-op='target-opset=XLA quantization-method=weight_only enable-per-channel-quantization=true' | FileCheck --check-prefix PerChannel %s
    
    module {
      // For XLA weight-only per-channel depthwise convolution, tensor shape should have
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 4.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/python/mlir_wrapper/basic_classes.cc

    See the License for the specific language governing permissions and
    limitations under the License.
    ==============================================================================*/
    
    #include "llvm/FileCheck/FileCheck.h"
    #include "mlir/IR/Block.h"  // from @llvm-project
    #include "mlir/IR/Location.h"  // from @llvm-project
    #include "mlir/IR/MLIRContext.h"  // from @llvm-project
    #include "mlir/IR/Operation.h"  // from @llvm-project
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jan 22 04:26:07 UTC 2022
    - 2.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/output-shapes.pbtxt

    # RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-unconditionally-use-set-output-shapes-on-import -tf-enable-shape-inference-on-import=true -tf-graph-as-function %s -o - | FileCheck %s
    
    # Verify importing with _output_shapes enabled works as expected.
    
    node {
      name: "_Arg"
      op: "_Arg"
      attr {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 21 04:07:13 UTC 2021
    - 3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/debuginfo/saved_model_error.py

    # limitations under the License.
    # ==============================================================================
    """Test file to display the error message and verify it with FileCheck."""
    
    # RUN: %p/saved_model_error | FileCheck %s
    
    import sys
    
    from absl import app
    
    import tensorflow.compat.v2 as tf
    if hasattr(tf, 'enable_v2_behavior'):
      tf.enable_v2_behavior()
    
    
    class TestModule(tf.Module):
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 2.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/graph-malformed.pbtxt

    # RUN: not tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -o - 2>&1 | FileCheck %s
    
    this is not a valid graph def
    
    # CHECK: Error parsing Protobuf
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 10 23:27:16 UTC 2021
    - 219 bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/feed-as-fetch.pbtxt

    # RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=input -tf-input-data-types=DT_INT32 -tf-input-shapes=8 -tf-output-arrays=input -o - | FileCheck %s
    
    node {
      name: "input"
      op: "Placeholder"
      attr {
        key: "dtype"
        value {
          type: DT_INT32
        }
      }
    }
    versions {
      producer: 27
    }
    
    # CHECK-LABEL: func @main
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 10 19:53:21 UTC 2020
    - 684 bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/trim-functions-tf.mlir

    // RUN: tf-opt -tfl-trim-funcs-tf="trim-funcs-allowlist=bar,foobar" %s | FileCheck %s
    
    func.func @foo(%arg0: tensor<1x4xf32>, %arg1: tensor<1x4xf32>) -> tensor<1x4xf32> {
      func.return %arg0 : tensor<1x4xf32>
    }
    
    func.func @bar(%arg0: tensor<2x4xf32>, %arg1: tensor<2x4xf32>) -> tensor<2x4xf32> {
      func.return %arg0 : tensor<2x4xf32>
    }
    
    func.func @foobar(%arg0: tensor<1x4xf32>, %arg1: tensor<1x4xf32>) -> tensor<1x4xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 26 23:53:32 UTC 2022
    - 565 bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/legalize-tf-no-runtime-verification.mlir

    // RUN: tf-opt %s -tfl-prepare-tf -tfl-legalize-tf='run-tfl-runtime-verification=false'  -tfl-optimize | FileCheck %s
    
    func.func @broadcast_to_bf16(%arg0: tensor<3xbf16>, %arg1: tensor<2xi64>) -> tensor<3x3xbf16> {
      %0 = "tf.BroadcastTo"(%arg0, %arg1) : (tensor<3xbf16>, tensor<2xi64>) -> tensor<3x3xbf16>
      func.return %0: tensor<3x3xbf16>
    
    // CHECK-LABEL: broadcast_to_bf16
    // CHECK:  [[CST:%.*]] = arith.constant dense<1.000000e+00> : tensor<3x3xbf16>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 648 bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/print.mlir

    // RUN: tf-opt %s --tf-print | FileCheck %s
    
    module {
    // Smoke test. We don't expect any modifications of the MLIR.
    
    // CHECK-LABEL: foo
    // CHECK: return
    func.func @foo(%arg0: tensor<f32>, %arg1: tensor<f32>) -> tensor<f32> {
      return %arg0 : tensor<f32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Oct 12 23:15:17 UTC 2023
    - 260 bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/multi_output_op.json

    // RUN: json_to_flatbuffer %p/test_schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir --mlir-print-debuginfo -o - | FileCheck %s
    
    // CHECK: #[[LOC0:.*]] = loc("<stdin>":0:0)
    // CHECK: "tfl.split"
    // CHECK-SAME: loc(#[[SPLIT_LOC:.*]])
    // CHECK: #[[LOC1:.*]] = loc("output0"(#[[LOC0]]))
    // CHECK: #[[LOC2:.*]] = loc("output1"(#[[LOC0]]))
    // CHECK: #[[SPLIT_LOC]] = loc(fused[#[[LOC1]], #[[LOC2]]])
    
    {
      "version": 3,
      "operator_codes": [
        {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Dec 03 00:08:31 UTC 2022
    - 1.7K bytes
    - Viewed (0)
Back to top