Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 337 for TensorT (0.12 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/tensor_array_ops_decomposition.cc

    #include "tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.h.inc"
    
    // A pass that converts tensor array operations to tensor operations and
    // read/assign ops on local variables. A later resource lifting pass can further
    // remove the local variables.
    //
    // This pass requires that the full shape of the tensor array can be inferred:
    // 1) the size needs to be a constant, 2) it specifies the full element shape,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 02 20:41:19 UTC 2023
    - 40.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc

      // criteria for determining quantizable ops differs by the inference type.
      bool getQuantizableOps(arith::ConstantOp op,
                             QuantizationUnits& quantizable_ops) const {
        // Non-float tensors do not need quantization.
        auto type = mlir::dyn_cast<ShapedType>(op.getType());
        if (!type || !type.getElementType().isF32()) return false;
    
        Value value = op.getResult();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  3. tensorflow/c/eager/c_api.cc

            "TFE_TensorHandleDeviceMemorySize may not be called on a ",
            handle->TypeString(), " tensor handle.");
        return 0;
      }
      const tensorflow::Tensor* tensor;
      status->status = handle->Tensor(&tensor);
      if (!status->status.ok()) {
        return 0;
      }
      return tensor->TotalBytes();
    }
    
    TFE_Op* TFE_NewOp(TFE_Context* ctx, const char* op_or_function_name,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 08:11:23 UTC 2024
    - 44K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/kernels/xla_ops.cc

        // the beginning are stripped off and the closure key is appended as the
        // last input. So the inputs look like: input tensors, resource variables,
        // closure key tensor.
        std::vector<const Tensor*> inputs = InputsFromContext(ctx);
        absl::flat_hash_map<int, const Tensor*> variable_snapshots;
        for (const auto& [variable_index, variable_tensor] :
             closure.resource_var_snapshots()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/api/v2/testdata/func_with_dead_ops.mlir

              tf_device.return %20#0, %20#1, %20#2, %20#3, %20#4, %20#5, %20#6, %20#7, %20#8, %20#9 : tensor<i32>, tensor<32x1024xf32>, tensor<i64>, tensor<i64>, tensor<1024xf32>, tensor<32x1024xf32>, tensor<1024x1xf32>, tensor<1024xf32>, tensor<1024x1xf32>, tensor<i64>...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 13 23:22:50 UTC 2024
    - 15.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfrt/tests/mlrt/async_while.mlir

      %0 = "tf.AddV2"(%loop_count, %cst_1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
      %1 = "tf.TensorArrayReadV3"(%handle, %loop_count, %flow_in) : (tensor<?x!tf_type.resource>, tensor<i32>, tensor<*xf32>) -> tensor<3x3xf32>
      %2 = "tf.MatMul"(%1, %matrix)  : (tensor<3x3xf32>, tensor<3x3xf32>) -> tensor<3x3xf32>
      return %0, %max_iterations, %handle, %flow_in, %2: tensor<i32>, tensor<i32>, tensor<?x!tf_type.resource>, tensor<*xf32>, tensor<3x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 22.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/shape-inference.mlir

      func.return %0 : tensor<?...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/optimize_tf_control_flow_side_effect.mlir

      %0 = "tf.ReadVariableOp"(%handle) : (tensor<!tf_type.resource<tensor<i32>>>) -> tensor<i32>
      %1 = "tf.AddV2"(%arg, %0) : (tensor<i32>, tensor<i32>) -> tensor<i32>
      func.return %1, %handle : tensor<i32>, tensor<!tf_type.resource<tensor<i32>>>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 02 01:15:55 UTC 2023
    - 10.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_default.mlir

      %0 = "tf.Conv2DBackpropInput"(%arg0, %arg1, %arg2) {strides = [1, 2, 2, 1], padding="SAME", dilations=[1, 1, 1, 1]}: (tensor<4xi32>, tensor<3x3x1x32xf32>, tensor<15x14x14x32xf32>) -> tensor<15x28x28x1xf32>
      %1 = "tf.Const"() {value = dense<1.000000e+00> : tensor<1xf32>} : () -> tensor<1xf32>
      %2 = "tf.Sub"(%0, %1): (tensor<15x28x28x1xf32>, tensor<1xf32>) -> tensor<15x28x28x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

        %8 = stablehlo.convert %7 : (tensor<1x4x2xi8>) -> tensor<1x4x2xf32>
        %9 = stablehlo.convert %2 : (tensor<2x3xi8>) -> tensor<2x3xf32>
        %10 = stablehlo.dot_general %8, %9, contracting_dims = [2] x [0] : (tensor<1x4x2xf32>, tensor<2x3xf32>) -> tensor<1x4x3xf32>
        %11 = stablehlo.convert %3 : (tensor<1x1x3xi32>) -> tensor<1x1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
Back to top