- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 43 for Dadd (0.08 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/tests/components/tf_to_stablehlo.mlir
// CHECK: %[[BROADCAST_1:.*]] = stablehlo.broadcast_in_dim %[[CONST_1]], dims = [3] : (tensor<8xf32>) -> tensor<1x1x2x8xf32> // CHECK: %[[ADD:.*]] = stablehlo.add %[[MUL]], %[[BROADCAST_1]] : tensor<1x1x2x8xf32> // CHECK: return %[[ADD]] : tensor<1x1x2x8xf32> // ----- func.func @fuse_conv_batchnorm(%arg_0: tensor<1x3x4x3xf32>) -> (tensor<1x3x2x2xf32>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 08 20:05:12 UTC 2024 - 13.6K bytes - Viewed (0) -
tensorflow/c/while_loop_test.cc
params_->cond_output = {less_than, 0}; TF_Operation* one = ScalarConst(1, params_->body_graph, s_); TF_Operation* add = Add(params_->body_inputs[0], {one, 0}, params_->body_graph, s_); ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_); params_->body_outputs[0] = {add, 0}; ExpectOK(); // Create backprop graph TF_Output grad_output;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 11 06:05:56 UTC 2024 - 15.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/unfuse_mhlo_batch_norm.mlir
%mean: tensor<256xf32>, %variance: tensor<256xf32>) -> (tensor<4x256xf32>) { // CHECK-DAG: %[[EPS_BCAST:.+]] = mhlo.constant dense<1.001000e-05> : tensor<256xf32> // CHECK-DAG: %[[VARIANCE_EPS:.+]] = mhlo.add %[[VARIANCE]], %[[EPS_BCAST]] : tensor<256xf32> // CHECK-DAG: %[[VARIANCE_EPS_RSQRT:.+]] = mhlo.rsqrt %[[VARIANCE_EPS]] : tensor<256xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 15:32:52 UTC 2024 - 10.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/decompose_optionals.cc
RewritePatternSet pattern_list(&getContext()); pattern_list.add<HandleOptionalFrom>(&getContext()); pattern_list.add<HandleOptionalGet>(&getContext()); pattern_list.add<HandleOptionalNone>(&getContext()); pattern_list.add<HandleFunc>(&getContext()); pattern_list.add<HandleCall>(&getContext()); pattern_list.add<HandleIf>(&getContext()); FrozenRewritePatternSet patterns(std::move(pattern_list));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/tfl_while_outline.mlir
%1 = tfl.add %arg1, %arg1 {fused_activation_function = "NONE"} : tensor<*xf32> func.return %0, %1, %arg2 : tensor<*xi32>, tensor<*xf32>, tensor<i32> } // CHECK-LABEL: func private @WhileOp_cond( // CHECK: tfl.greater // CHECK-LABEL: func private @WhileOp_body( // CHECK: tfl.sub // CHECK: tfl.add // -----
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 13.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/const-fold.mlir
// CHECK: %0 = tfl.add %[[CST]], %[[CST_0]] {fused_activation_function = "SIGN_BIT"} : tensor<4xf32> %5 = "tfl.add"(%0, %1) {fused_activation_function = "NONE"} : (tensor< f32>, tensor< f32>) -> tensor< f32> %6 = "tfl.add"(%0, %3) {fused_activation_function = "NONE"} : (tensor< f32>, tensor<4xf32>) -> tensor<4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 45.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
pass_manager.addNestedPass<mlir::func::FuncOp>( mlir::TFL::CreatePostQuantizePass(emit_quant_adaptor_ops)); } pass_manager.addNestedPass<mlir::func::FuncOp>( mlir::TFL::CreateOptimizeOpOrderPass()); // Add optimization pass after quantization for additional fusing // opportunities. if (!pass_config.unfold_batch_matmul) { // Enable an optimization pass that transforms FC to BatchMatmul only when
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
if (quant_specs_.post_training_quantization) { patterns_1.add<PrepareLstmOutputScale<LSTMOp>>(ctx); patterns_1.add<PrepareLstmOutputScale<UnidirectionalSequenceLSTMOp>>(ctx); } if (is_qdq_conversion_ || quant_specs_.qdq_conversion_mode != quant::QDQConversionMode::kQDQNone) { patterns_1.add<PropagateTransposedPerAxisQuantDim>(ctx); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc
// An unknown side effect dominates other side effects so we don't have // to add them and can return here. return; } // Add op-based side effects from regions (if any). for (Region& region : op->getRegions()) { AddRegionSideEffectsForOp(region, op); } // Add op-based side effects for the op itself. for (const auto& effect : effects) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 15 09:04:13 UTC 2024 - 41.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
const bool enable_per_channel_quantized_weight) { patterns.add<XlaCallModuleOpToCallOp<QuantizeConvolutionOpPattern>>( ctx, enable_per_channel_quantized_weight); patterns.add<XlaCallModuleOpToCallOp<QuantizeDotGeneralOpPattern>>( ctx, enable_per_channel_quantized_weight); patterns .add<XlaCallModuleOpToCallOp<QuantizeWeightOnlyOpPattern<ConvolutionOp>>>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0)