- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 16 for getF32 (0.14 sec)
-
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc
return UniformQuantizedType::getChecked( loc, /*flags=*/QuantizationFlags::Signed, /*storageType=*/IntegerType::get(&context, /*width=*/8), /*expressedType=*/FloatType::getF32(&context), scale, zero_point, /*storageTypeMin=*/llvm::minIntN(8) + (narrow_range ? 1 : 0), /*storageTypeMax=*/llvm::maxIntN(8)); } UniformQuantizedType CreateI32F32UniformQuantizedType(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils_test.cc
{2, 2}, quant::UniformQuantizedType::get( quant::QuantizationFlags::FlagValue::Signed, IntegerType::get(context.get(), 8), FloatType::getF32(context.get()), 3.0, 2, -128, 127)); auto dense_attr = GetDenseAttrFromTensorProtoAttr(GetQint8Tensor(), result_tensor_type); ASSERT_TRUE(succeeded(dense_attr));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/convert_tensor_test.cc
mlir::FloatType::getBF16(&context))); ASSERT_NO_FATAL_FAILURE(VerifyConversion<float>( {1.0, -1.0}, DT_FLOAT, mlir::FloatType::getF32(&context))); ASSERT_NO_FATAL_FAILURE(VerifyConversion<double>( {1.0, -1.0}, DT_DOUBLE, mlir::FloatType::getF64(&context))); ASSERT_NO_FATAL_FAILURE(VerifyConversion<tsl::float8_e5m2>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
return APInt(/*numBits=*/32, real_int); }; auto dequant_values = mlir::cast<DenseIntOrFPElementsAttr>(input_values) .mapValues(FloatType::getF32(rewriter.getContext()), llvm::function_ref<DequantizeFuncType>(dequantize_func)); rewriter.replaceOpWithNewOp<TFL::ConstOp>(dequant_op, dequant_op.getType(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc
Type tensor_type_f32; if (tensor_type.hasRank()) { tensor_type_f32 = tensorflow::GetTypeFromTFTensorShape( tensor_type.getShape(), FloatType::getF32(context)); } else { tensor_type_f32 = UnrankedTensorType::get(FloatType::getF32(context)); } // Add cast to f32 to conform with element type of result. operand = rewriter.create<CastOp>(op.getLoc(), tensor_type_f32, operand);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 74.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf.cc
// accumulation over the given input type. Type GetSumAccumulationType(Type input_type) { MLIRContext *ctx = input_type.getContext(); if (input_type.isBF16() || input_type.isF16()) return FloatType::getF32(ctx); if (input_type.isSignlessInteger(8) || input_type.isSignlessInteger(16)) return IntegerType::get(ctx, 32); return input_type; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 20:00:43 UTC 2024 - 291.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
auto new_result_types = llvm::to_vector<6>(tf_fused_batch_norm_op.getResultTypes()); // reserve_space_3 new_result_types.push_back( UnrankedTensorType::get(FloatType::getF32(rewriter.getContext()))); auto tf_fused_batch_norm_op_v3 = CreateTfOp<TF::FusedBatchNormV3Op>( rewriter, tf_fused_batch_norm_op, new_result_types, tf_fused_batch_norm_op.getOperands(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 170.8K bytes - Viewed (0) -
src/time/zoneinfo_read.go
// 42 off[4] // 46 name[namelen] // 46+namelen+xlen+fclen - next header // if get4(buf) != zcheader { break } meth := get2(buf[10:]) size := get4(buf[24:]) namelen := get2(buf[28:]) xlen := get2(buf[30:]) fclen := get2(buf[32:]) off := get4(buf[42:]) zname := buf[46 : 46+namelen] buf = buf[46+namelen+xlen+fclen:] if string(zname) != name { continue }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 19:57:43 UTC 2024 - 14.4K bytes - Viewed (0) -
src/cmd/link/internal/ld/macho.go
var linkeditSeg, textSeg *macho.Segment loadOff := int64(machoHeaderSize64) get32 := mf.ByteOrder.Uint32 for _, l := range mf.Loads { data := l.Raw() cmd, sz := get32(data), get32(data[4:]) if cmd == LC_CODE_SIGNATURE { sigOff = int64(get32(data[8:])) sigSz = int64(get32(data[12:])) csCmdOff = loadOff } if seg, ok := l.(*macho.Segment); ok { switch seg.Name {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 20 15:32:53 UTC 2024 - 43.9K bytes - Viewed (0) -
src/runtime/atomic_pointer.go
// See go.dev/issue/67401. // //go:linkname atomicwb //go:nosplit func atomicwb(ptr *unsafe.Pointer, new unsafe.Pointer) { slot := (*uintptr)(unsafe.Pointer(ptr)) buf := getg().m.p.ptr().wbBuf.get2() buf[0] = *slot buf[1] = uintptr(new) } // atomicstorep performs *ptr = new atomically and invokes a write barrier. // //go:nosplit func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 4K bytes - Viewed (0)