Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 184 for output_types (0.29 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td

          %1 = "tf.ReduceDataset"(%arg0, %arg1) {
            Targuments = [],
            Tstate = [i64], device = "",
            f = @__reduce_func_1, f._tf_data_function = true,
            output_shapes = [#tf_type.shape<>],
            output_types = [i64], use_inter_op_parallelism = true, _xla_compile_device_type="TPU"} :
              (tensor<!tf_type.variant>, tensor<i64>) -> (tensor<i64>)
          func.return
        }
        ```
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 99.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/g3doc/_includes/tf_passes.md

      ) {
        %1 = "tf.ReduceDataset"(%arg0, %arg1) {
          Targuments = [],
          Tstate = [i64], device = "",
          f = @__reduce_func_1, f._tf_data_function = true,
          output_shapes = [#tf_type.shape<>],
          output_types = [i64], use_inter_op_parallelism = true, _xla_compile_device_type="TPU"} :
     (tensor<!tf_type.variant>, tensor<i64>) -> (tensor<i64>)
        func.return
     }
     ```
    
     with the following reduction function:
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Aug 02 02:26:39 UTC 2023
    - 96.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td

        Variadic<TF_Tensor>:$init_func_other_args,
        Variadic<TF_Tensor>:$next_func_other_args,
        Variadic<TF_Tensor>:$finalize_func_other_args,
    
        ConfinedAttr<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
        ConfinedAttr<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
        DefaultValuedOptionalAttr<StrAttr, "\"\"">:$metadata
      );
    
      let results = (outs
        TF_VariantTensor:$handle
      );
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 04:08:35 UTC 2024
    - 90.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/translate/import_model.cc

            "Placeholder node");
      }
    
      DataType dtype = it->second.imported_dtype;
      // Uses the existing output type if it isn't specified by the user.
      if (dtype == DT_INVALID) {
        dtype = node->attr().at("output_types").list().type(0);
      }
      // Update op name, drop inputs and set attributes required by the Placeholder
      // op.
      *node->mutable_op() = "Placeholder";
      node->clear_attr();
      node->clear_input();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 183.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/utils/nms_utils.cc

      Value iou_threshold = func_.getArgument(3);
      Value score_threshold = func_.getArgument(4);
      auto output_type0 = func_.getFunctionType().getResult(0);
      auto output_type1 = func_.getFunctionType().getResult(1);
    
      OpBuilder builder(func_.getBody());
      auto op = builder.create<mlir::TFL::NonMaxSuppressionV4Op>(
          func_.getLoc(), output_type0, output_type1, boxes, scores,
          max_output_size, iou_threshold, score_threshold);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc

      }
      bool changed = false;
      int next_op_result = 0;
      for (auto output_type : main_output_types) {
        if (tensorflow::IsTokenType(output_type)) continue;
        auto output_type_ranked = mlir::dyn_cast<RankedTensorType>(output_type);
        if (output_type_ranked == nullptr) {
          llvm::errs() << "Unsupported XlaCallModule result type: " << output_type
                       << "\n";
          return false;
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 08 07:28:49 UTC 2024
    - 134.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/utils/arithmetic_count_util.h

        auto output = op->getResult(0);
        auto output_type =
            mlir::dyn_cast_or_null<mlir::RankedTensorType>(output.getType());
        if (output_type == nullptr || !output_type.hasStaticShape()) return false;
    
        int64_t cols = 1;
        for (int i = 0; i < output_type.getRank() - 1; ++i) {
          cols *= output_type.getDimSize(i);
        }
        const int64_t cost_per_col = 2 * weight_type.getNumElements();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 3.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.cc

      auto output = op->getResult(0);
      auto output_type = mlir::dyn_cast_or_null<RankedTensorType>(output.getType());
      if (!output_type) return failure();
    
      // bias should be a vector sized of the last output dim.
      int64_t num_units = output_type.getDimSize(output_type.getRank() - 1);
      auto bias_type =
          mlir::RankedTensorType::get({num_units}, output_type.getElementType());
    
      mlir::DenseElementsAttr bias_attr;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 25.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

          {"quantized_ops": ["${main_op}", "BiasAdd", "Relu"], "act_func": "internal_dequantize_and_relu_fn", "output_type": "f32"},
          {"quantized_ops": ["${main_op}", "BiasAdd", "Relu6"], "act_func": "internal_dequantize_and_relu6_fn", "output_type": "f32"},
        ]
        func.func @GenerateQuantizedFunctionName(${quantized_ops}, "${output_type}")(%input : tensor<*xi8>,
                               %filter : tensor<*xi8>, %bias : tensor<*xi32>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc

            op->getLoc(), *output_type, op.getInput());
        rewriter.replaceOpWithNewOp<mhlo::BitcastConvertOp>(
            op,
            output_type->clone(
                mlir::dyn_cast<quant::QuantizedType>(output_type->getElementType())
                    .getStorageType()),
            result);
    
        return success();
      }
    };
    
    // UniformDequantizeOp takes TF quantized types as input which would have been
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 30.9K bytes
    - Viewed (0)
Back to top