Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for getElementDtype (0.32 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc

          return failure();
        }
    
        if (!IsI8F32UniformQuantizedPerAxisType(filter_type.getElementType())) {
          LLVM_DEBUG(llvm::dbgs() << "Expected a per-channel uniform quantized "
                                     "(i8->f32) type filter. Got: "
                                  << filter_type.getElementType() << "\n");
          return failure();
        }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 09:00:19 UTC 2024
    - 99.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc

        if (!keys_ty || !keys_ty.hasStaticShape() ||
            !keys_ty.getElementType().isIntOrFloat())
          return rewriter.notifyMatchFailure(
              op,
              "only match for the case where the first operand has a static "
              "int/float shapeType");
        if (!indices_ty || !indices_ty.hasStaticShape() ||
            !indices_ty.getElementType().isInteger(32))
          return rewriter.notifyMatchFailure(
              op,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 154.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/ir/tfl_ops.cc

      const auto output_type = getType(0).cast<ShapedType>();
    
      // Folding only implemented for float tensors.
      if (!input_type.getElementType().isF32() ||
          !weights_type.getElementType().isF32() ||
          !output_type.getElementType().isF32() ||
          (has_bias && !bias_type.getElementType().isF32())) {
        return failure();
      }
    
      // Folding only implemented for static shapes
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 169.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/optimize.cc

      if (!begin_attr || !size_attr) {
        return false;
      }
    
      auto begin_elem_ty = begin_attr.getType().getElementType();
      if (!begin_elem_ty.isInteger(32) && !begin_elem_ty.isInteger(64)) {
        return false;
      }
      auto size_elem_ty = size_attr.getType().getElementType();
      if (!size_elem_ty.isInteger(32) && !size_elem_ty.isInteger(64)) {
        return false;
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc

      // is up to ~2x faster.
      const bool is_f16 = input_ty.getElementType().isF16();
      if (is_f16 && CanUseTensorCores(devices)) return "NHWC";
    
      // For f32/f16 data type decision depends on the filter size in spatial
      // dimensions, for other data types we keep current data format.
      if (!input_ty.getElementType().isF32() && !input_ty.getElementType().isF16())
        return getDataFormat();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 146.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc

                size.getSExtValue(),
                start.getType().cast<TensorType>().getElementType()),
            start, limit, delta);
      }
      return RangeOp::build(
          builder, result,
          tensorflow::GetTypeFromTFTensorShape(
              {-1}, start.getType().cast<TensorType>().getElementType()),
          start, limit, delta);
    }
    
    OpFoldResult RangeOp::fold(FoldAdaptor adaptor) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 170.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc

          mlir::dyn_cast<TensorFlowTypeWithSubtype>(operand_type.getElementType());
      if (!operand_handle_type) return result_type.getElementType();
      auto result_handle_type =
          mlir::cast<TensorFlowTypeWithSubtype>(result_type.getElementType());
      if (operand_handle_type.GetSubtypes().empty() ||
          !result_handle_type.GetSubtypes().empty())
        return result_type.getElementType();
      return operand_handle_type;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 08 07:28:49 UTC 2024
    - 134.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/flatbuffer_export.cc

        } else {
          return std::nullopt;
        }
      }
    
      auto element_type = tensor_type.getElementType();
      tflite::TensorType tflite_element_type =
          GetTFLiteType(tensor_type.getElementType()).value();
      std::optional<std::vector<BufferOffset<tflite::VariantSubType>>>
          variant_params = BuildTFVariantType(element_type);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:41:49 UTC 2024
    - 164.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

      DerivedTFLiteTypeAttr output_type = DerivedTFLiteTypeAttr<[{
        return getResult().getType().cast<TensorType>().getElementType().
            cast<IntegerType>().getWidth() > 32 ? tflite::TensorType_INT64 :
                tflite::TensorType_INT32;
        }], [{
          TypeAttr::get(getResult().getType().cast<TensorType>().getElementType())
        }]>;
    }
    
    def TFL_ArgMinOp : TFL_Op<"arg_min", [
        QuantizableResult,
        Pure]> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
Back to top