- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 200 for scast (0.06 sec)
-
platforms/core-runtime/stdlib-java-extensions/src/main/java/org/gradle/internal/Cast.java
* * @param outputType The type to cast the input to * @param object The object to be cast (must not be {@code null}) * @param <O> The type to be cast to * @param <I> The type of the object to be vast * @return The input object, cast to the output type */ public static <O, I> O cast(Class<O> outputType, I object) { try {
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon Jun 10 14:28:48 UTC 2024 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td
]; } def PrepareQuantizePass : Pass<"stablehlo-prepare-quantize", "mlir::ModuleOp"> { let summary = "Prepare StableHLO dialect for static range quantization by converting quantfork.stats into quantfork.qcast and dcast ops."; let options = [ Option<"enable_per_channel_quantized_weight_", "enable-per-channel-quantized-weight", "bool", /*default=*/"true", "Whether to enable per-channel quantized weights.">,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize.mlir
%1 = "quantfork.qcast"(%0) {volatile} : (tensor<2x3xf32>) -> tensor<2x3x!quant.uniform<i8<-127:127>:f32, 5.000000e-03>> %2 = "quantfork.dcast"(%1) : (tensor<2x3x!quant.uniform<i8<-127:127>:f32, 5.000000e-03>>) -> tensor<2x3xf32> %3 = "quantfork.qcast"(%arg0) {volatile} : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 6.000000e-03:-128>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 01:38:40 UTC 2024 - 6.3K bytes - Viewed (0) -
src/cmd/vendor/github.com/ianlancetaylor/demangle/ast.go
tpp.Param.goString(indent+2, "Param: ")) } // Cast is a type cast. type Cast struct { To AST } func (c *Cast) print(ps *printState) { ps.writeString("operator ") ps.print(c.To) } func (c *Cast) Traverse(fn func(AST) bool) { if fn(c) { c.To.Traverse(fn) } } func (c *Cast) Copy(fn func(AST) AST, skip func(AST) bool) AST { if skip(c) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 31 19:48:28 UTC 2024 - 105.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/post_quantize.mlir
// CHECK-NOT: "quantfork.qcast" // CHECK-NOT: "quantfork.dcast" // CHECK: return %[[CST]] %cst = stablehlo.constant dense<[[-0.960978984, -0.390246302], [-0.790828585, -0.601039409], [-1.0280807, -1.02731466]]> : tensor<3x2xf32> %q = "quantfork.qcast"(%cst) {volatile} : (tensor<3x2xf32>) -> tensor<3x2x!quant.uniform<i8:f32, 0.013075299590241675:-64>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 4.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.cc
b.setInsertionPoint(op); if (auto dq = llvm::dyn_cast<DequantizeOp>(op)) { auto dcast = b.create<quantfork::DequantizeCastOp>( dq.getLoc(), dq.getOutput().getType(), dq.getInput()); dq.getOutput().replaceAllUsesWith(dcast); dq.erase(); } else if (auto q = llvm::dyn_cast<QuantizeOp>(op)) { auto qcast = b.create<quantfork::QuantizeCastOp>( q.getLoc(), q.getOutput().getType(), q.getInput());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 02:50:01 UTC 2024 - 3.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc
std::vector<Value> sliced_lhs = sliceInput(input_lhs, bcast.x_batch_size(), loc, rewriter); std::vector<Value> sliced_rhs = sliceInput(input_rhs, bcast.y_batch_size(), loc, rewriter); // Compute (single batch) MatMul for each output batch. std::vector<Value> matmuls; matmuls.reserve(bcast.output_batch_size()); for (int batch_idx : llvm::seq<int>(0, bcast.output_batch_size())) { int lhs_batch_idx, rhs_batch_idx;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.6K bytes - Viewed (0) -
src/cmd/vendor/github.com/ianlancetaylor/demangle/demangle.go
if cast != nil { st.setTemplate(cast, tmpl) st.clearTemplateArgs(args) cast = nil } a = nil next = tmpl case 'T': next = st.templateParam() case 'E': if a == nil { st.fail("expected prefix") } if cast != nil { var toTmpl *Template if castTempl, ok := cast.To.(*Template); ok { toTmpl = castTempl
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 31 19:48:28 UTC 2024 - 94.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_drq.mlir
return %0 : tensor<*xf32> } // CHECK: %[[cst:.*]] = "arith.constant"() <{value = dense<0.000000e+00> : tensor<2x1024xf32>}> : () -> tensor<2x1024xf32> // CHECK: %[[q_cst:.*]] = "quantfork.qcast"(%[[cst]]) : (tensor<2x1024xf32>) -> tensor<2x1024x!quant.uniform<i8<-127:127>:f32, 3.9370078740157481E-9>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
// TODO(b/204265523): Removes this pass after the exporting MLIR to SavedModel // path is available. std::unique_ptr<OperationPass<ModuleOp>> CreateInsertMainFunctionPass(); // Converts FakeQuant ops to quant.qcast and quant.dcast (QDQ) pairs. std::unique_ptr<OperationPass<func::FuncOp>> CreateConvertFakeQuantToQdqPass(); // Lifts the quantizable spots as composite functions. std::unique_ptr<OperationPass<ModuleOp>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0)