- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 50 for constop (0.12 sec)
-
platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt
} >&2 # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false nonstop=false case "\$( uname )" in #( CYGWIN* ) cygwin=true ;; #( Darwin* ) darwin=true ;; #( MSYS* | MINGW* ) msys=true ;; #( NONSTOP* ) nonstop=true ;; esac CLASSPATH=$classpath <% if ( mainClassName.startsWith('--module ') ) { %> MODULE_PATH=$modulePath
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Tue Apr 23 13:43:33 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/fold_broadcast_pass.cc
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" namespace mlir { namespace odml { static const APFloat &addSign(const APFloat &v, Type) { return v; } static APSInt addSign(const APInt &v, Type t) { // Add signedness information to the value, treating signless as signed, // unless it's i1. return APSInt(v, t.isUnsignedInteger() || t.isSignlessInteger(1)); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
PatternRewriter& rewriter) const override { Operation* def_op = op.getInput().getDefiningOp(); auto qconst_op = llvm::dyn_cast_or_null<QConstOp>(def_op); if (qconst_op == nullptr) return failure(); auto dense_elements = mlir::dyn_cast_or_null<DenseElementsAttr>(qconst_op.getValue()); if (dense_elements == nullptr) return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
} return true; } return false; } // Insert CastOp which is used to for converting float32 ConstantOp into // float16 quantization. If there is an existing CastOp connected to the // ConstantOp, the quantize_op will be rewired to the existing CastOp. This // guarantees at most one CastOp is created for float32 to float16 conversion.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir
%2 = stablehlo.add %1, %0 : tensor<1x4x3x3xf32> return %2 : tensor<1x4x3x3xf32> } // CHECK-SAME: (%[[ARG_0:.+]]: tensor<1x3x3x4xf32>) -> tensor<1x4x3x3xf32> // CHECK-DAG: %[[CONST_0:.+]] = stablehlo.constant // CHECK-DAG: %[[TRANSPOSE_0:.+]] = stablehlo.transpose %[[CONST_0]], dims = [0, 2, 3, 1] : (tensor<1x4x3x3xf32>) -> tensor<1x3x3x4xf32> // Check that the shape of the add is changed to reflect the deferred transpose.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 18 20:32:46 UTC 2024 - 14.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize.cc
return std::make_unique<QuantizePass>(); } std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass( const bool verify_numeric, const bool whole_model_verify, const bool legacy_float_scale, const absl::flat_hash_set<std::string>& ops_blocklist, const absl::flat_hash_set<std::string>& nodes_blocklist) { quant::QuantizationSpecs quant_specs; quant_specs.verify_numeric = verify_numeric;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/while_loop_outline.cc
} else { if (IsCompatibleTypeWithTFLCastOp(value.getType()) && IsCompatibleTypeWithTFLCastOp(type)) { auto cast = b.create<CastOp>(yield_op->getLoc(), type, value); args.push_back(cast); } else { auto cast = b.create<TF::CastOp>(yield_op->getLoc(), type, value); args.push_back(cast); } } } args.append(new_args.begin(), new_args.end());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
// the node that saves the variables, after the ModuleOp has been exported as // GraphDef. std::unique_ptr<OperationPass<ModuleOp>> CreateMergeSaveFunctionOpsToMainPass(); // Creates a pass that "unfreezes" ConstOps into variables. Each ConstOp's use // will be replaced by a VarHandleOp -> ReadVariableOp pattern. The newly // created variables will be initialized in the session initializer function via // AssignVariableOps.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/genericOps.go
{name: "ConstNil", typ: "BytePtr"}, // nil pointer {name: "Const8", aux: "Int8"}, // auxint is sign-extended 8 bits {name: "Const16", aux: "Int16"}, // auxint is sign-extended 16 bits {name: "Const32", aux: "Int32"}, // auxint is sign-extended 32 bits // Note: ConstX are sign-extended even when the type of the value is unsigned.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 15:49:20 UTC 2024 - 42.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc
for (int i : llvm::seq<int>(1, num_dims - 1)) { Value input_size_i = GetDimValue(builder, loc, input_shape_value, i); const int stride_i = mlir::cast<IntegerAttr>(strides[i]).getInt(); const int dilation_i = mlir::cast<IntegerAttr>(dilations[i]).getInt(); const int filter_i = filter_shape.getDimSize(i - 1); Value pad_i_low, pad_i_high; GetSamePaddingValues(builder, loc, input_size_i, filter_i, dilation_i,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0)