Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 191 for output_types (0.22 sec)

  1. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

    def TF_AnonymousIteratorOp : TF_Op<"AnonymousIterator", [TF_UniqueResourceAllocation]> {
      let summary = "A container for an iterator resource.";
    
      let arguments = (ins
        ConfinedAttr<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
        ConfinedAttr<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
      );
    
      let results = (outs
        Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator" or
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/region-control-flow-to-functional.mlir

        "tf.Yield"(%1) : (tensor<5xf32>) -> ()
      }) {device = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0", metadata = "", operandSegmentSizes = array<i32: 1, 2, 1>, output_shapes = [#tf_type.shape<>], output_types = [!tf_type.string]} : (tensor<4xf32>, tensor<3xf32>, tensor<!tf_type.resource>, tensor<2xf32>) -> tensor<!tf_type.variant>
      return
    }
    
    // -----
    
    func.func @init(%arg0: tensor<4xf32>) -> tensor<7xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 02 11:15:34 UTC 2024
    - 44.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td

          %1 = "tf.ReduceDataset"(%arg0, %arg1) {
            Targuments = [],
            Tstate = [i64], device = "",
            f = @__reduce_func_1, f._tf_data_function = true,
            output_shapes = [#tf_type.shape<>],
            output_types = [i64], use_inter_op_parallelism = true, _xla_compile_device_type="TPU"} :
              (tensor<!tf_type.variant>, tensor<i64>) -> (tensor<i64>)
          func.return
        }
        ```
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 99.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/g3doc/_includes/tf_passes.md

      ) {
        %1 = "tf.ReduceDataset"(%arg0, %arg1) {
          Targuments = [],
          Tstate = [i64], device = "",
          f = @__reduce_func_1, f._tf_data_function = true,
          output_shapes = [#tf_type.shape<>],
          output_types = [i64], use_inter_op_parallelism = true, _xla_compile_device_type="TPU"} :
     (tensor<!tf_type.variant>, tensor<i64>) -> (tensor<i64>)
        func.return
     }
     ```
    
     with the following reduction function:
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Aug 02 02:26:39 UTC 2023
    - 96.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td

        Variadic<TF_Tensor>:$init_func_other_args,
        Variadic<TF_Tensor>:$next_func_other_args,
        Variadic<TF_Tensor>:$finalize_func_other_args,
    
        ConfinedAttr<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
        ConfinedAttr<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
        DefaultValuedOptionalAttr<StrAttr, "\"\"">:$metadata
      );
    
      let results = (outs
        TF_VariantTensor:$handle
      );
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 04:08:35 UTC 2024
    - 90.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/translate/import_model.cc

            "Placeholder node");
      }
    
      DataType dtype = it->second.imported_dtype;
      // Uses the existing output type if it isn't specified by the user.
      if (dtype == DT_INVALID) {
        dtype = node->attr().at("output_types").list().type(0);
      }
      // Update op name, drop inputs and set attributes required by the Placeholder
      // op.
      *node->mutable_op() = "Placeholder";
      node->clear_attr();
      node->clear_input();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 183.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/utils/nms_utils.cc

      Value iou_threshold = func_.getArgument(3);
      Value score_threshold = func_.getArgument(4);
      auto output_type0 = func_.getFunctionType().getResult(0);
      auto output_type1 = func_.getFunctionType().getResult(1);
    
      OpBuilder builder(func_.getBody());
      auto op = builder.create<mlir::TFL::NonMaxSuppressionV4Op>(
          func_.getLoc(), output_type0, output_type1, boxes, scores,
          max_output_size, iou_threshold, score_threshold);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc

      }
      bool changed = false;
      int next_op_result = 0;
      for (auto output_type : main_output_types) {
        if (tensorflow::IsTokenType(output_type)) continue;
        auto output_type_ranked = mlir::dyn_cast<RankedTensorType>(output_type);
        if (output_type_ranked == nullptr) {
          llvm::errs() << "Unsupported XlaCallModule result type: " << output_type
                       << "\n";
          return false;
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 08 07:28:49 UTC 2024
    - 134.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/utils/arithmetic_count_util.h

        auto output = op->getResult(0);
        auto output_type =
            mlir::dyn_cast_or_null<mlir::RankedTensorType>(output.getType());
        if (output_type == nullptr || !output_type.hasStaticShape()) return false;
    
        int64_t cols = 1;
        for (int i = 0; i < output_type.getRank() - 1; ++i) {
          cols *= output_type.getDimSize(i);
        }
        const int64_t cost_per_col = 2 * weight_type.getNumElements();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 3.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.cc

      auto output = op->getResult(0);
      auto output_type = mlir::dyn_cast_or_null<RankedTensorType>(output.getType());
      if (!output_type) return failure();
    
      // bias should be a vector sized of the last output dim.
      int64_t num_units = output_type.getDimSize(output_type.getRank() - 1);
      auto bias_type =
          mlir::RankedTensorType::get({num_units}, output_type.getElementType());
    
      mlir::DenseElementsAttr bias_attr;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 25.4K bytes
    - Viewed (0)
Back to top