Search Options

Results per page
Sort
Preferred Languages
Advance

Results 91 - 100 of 1,398 for TensorT (0.29 sec)

  1. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/mul_v2.mlir

      // CHECK-NEXT:}
    
      %0 = "tfl.pseudo_qconst"() { qtype = tensor<3x!quant.uniform<i8:f32, 0.1>>, value = dense<2> : tensor<3xi8>} : () -> tensor<3x!quant.uniform<i8:f32, 0.1>>
      %1 = "tfl.mul"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<3x!quant.uniform<i8:f32, 0.1>>, tensor<3x!quant.uniform<i8:f32, 0.1>>) -> tensor<3x!quant.uniform<i8:f32, 0.1>> loc("mul")
      func.return %1 : tensor<3x!quant.uniform<i8:f32, 0.1>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 2.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/mul_v3.mlir

      // CHECK-NEXT:}
    
      %0 = "tfl.pseudo_qconst"() { qtype = tensor<3x!quant.uniform<i8:f32, 1.0>>, value = dense<2> : tensor<3xi8>} : () -> tensor<3x!quant.uniform<i8:f32, 1.0>>
      %1 = "tfl.mul"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<3x!quant.uniform<i8:f32, 1.0>>, tensor<3x!quant.uniform<i8:f32, 1.0>>) -> tensor<3x!quant.uniform<i8:f32, 1.0>> loc("mul")
      func.return %1 : tensor<3x!quant.uniform<i8:f32, 1.0>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 2.9K bytes
    - Viewed (0)
  3. tensorflow/c/experimental/ops/array_ops.cc

      int num_retvals = 1;
      return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
    }
    
    // Op: IdentityN()
    // Summary: Returns a list of tensors with the same shapes and contents as the
    // input
    //
    // Description:
    //   tensors.
    //
    //   This op can be used to override the gradient for complicated functions. For
    //   example, suppose y = f(x) and we wish to apply a custom function g for
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 10 19:11:36 UTC 2022
    - 6.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/transpose_conv_optional.mlir

      %0 = "tfl.transpose_conv"(%arg0, %arg1, %arg2, %cst) {padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32, fused_activation_function = "NONE"} : (tensor<4xi32>, tensor<32x4x4x128xf32>, tensor<1x32x42x128xf32>, none) -> tensor<1x64x84x32xf32>
      func.return %0 : tensor<1x64x84x32xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 14 04:58:17 UTC 2022
    - 2.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/python/representative_dataset_test.py

    
    def _contains_tensor(sample: repr_dataset.RepresentativeSample) -> bool:
      """Determines whether `sample` contains any tf.Tensors.
    
      Args:
        sample: A `RepresentativeSample`.
    
      Returns:
        True iff `sample` contains at least tf.Tensors.
      """
      return any(map(lambda value: isinstance(value, core.Tensor), sample.values()))
    
    
    class RepresentativeDatasetTest(test.TestCase):
      """Tests functions for representative datasets."""
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jan 04 07:35:19 UTC 2024
    - 11.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_launch_util.h

    class XlaComputationLaunchContext {
     public:
      // Create a new launch context. 'allocate_xla_tensors' is true if allocated
      // output tensors and variables are always XlaTensors. If false they are
      // assumed to be "normal" device pointers.
      // If 'use_multiple_streams' is true, tensors may be defined and used on
      // multiple streams and so se::Events must be defined and waited for. If
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 11.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/custom_op_with_tflite_op.mlir

      %0 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
      %1 = "tfl.mul"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
      // tf.MyCustomOp is the result of conversion to a Custom op
      %2 = "tf.MyCustomOp"(%1, %0) {fused_activation_function = "RELU", int_attr = 2 : i32}  : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("MyCustomOp")
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 4.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/svdf.mlir

    // CHECK-EMPTY:
    
    ^bb0(%arg0: tensor<4 x f32>, %arg1: tensor<4 x f32>, %arg2: tensor<4 x f32>, %arg3: tensor<4 x f32>):
      %0 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
      %1 = "tfl.svdf"(%arg0, %arg1, %arg2, %arg3, %0) {fused_activation_function = "RELU", rank = 2 : i32} : (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
      func.return %1 : tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 3.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/unidirectional_sequence_rnn.mlir

    // CHECK-EMPTY:
    
    ^bb0(%arg0: tensor<4 x f32>, %arg1: tensor<4 x f32>, %arg2: tensor<4 x f32>, %arg3: tensor<4 x f32>):
      %0 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4x4xf32>} : () -> tensor<4x4xf32> loc("Const")
      %1 = "tfl.unidirectional_sequence_rnn"(%arg0, %arg1, %arg2, %arg3, %0) {fused_activation_function = "TANH", time_major = true} : (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4x4xf32>) -> tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 3.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

    SparseTensor is not supported. The return value of the decorated function
    must be a Tensor or a list/tuple of Tensors.
      }];
    
      let arguments = (ins
        Arg<Variadic<TF_Tensor>, [{The tensors to be batched.}]>:$in_tensors,
        Arg<Variadic<TF_Tensor>, [{The tensors which are captured in the function, and don't need
    to be batched.}]>:$captured_tensors,
    
        SymbolRefAttr:$f,
        I64Attr:$num_batch_threads,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
Back to top