- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 82 for conv2 (0.06 sec)
-
subprojects/diagnostics/src/integTest/groovy/org/gradle/api/tasks/diagnostics/DependencyReportTaskIntegrationTest.groovy
configurations { conf1 conf2 } dependencies { conf1 'org:toplevel1:1.0' conf2 'org:toplevel2:1.0' } """ when: run "dependencies", "--configuration", "conf2" then: output.contains """ conf2 \\--- org:toplevel2:1.0 +--- org:leaf3:1.0
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Wed Oct 25 05:32:54 UTC 2023 - 31.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantize_preprocess.cc
mlir::mhlo::createLegalizeDotToDotGeneralPass()); // Unfuse mhlo BatchNorm to primitive ops. pm.addNestedPass<mlir::func::FuncOp>(mlir::odml::createUnfuseBatchNormPass()); // Fuse Conv + Mul to Conv. pm.addNestedPass<mlir::func::FuncOp>(mlir::odml::createFuseConvolutionPass()); // Fold broadcast_in_dim + Mul. pm.addNestedPass<mlir::func::FuncOp>(mlir::odml::createFoldBroadcastPass());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 12:49:45 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/README.md
In this pass, every op will be targeted with the user specified targets based on the device capabilites. For example, If the user specified the desired targets are "GPU", "CPU", `conv2d` can run on both "GPU" and "CPU", we will annotate the op `conv2d` with "GPU" since it's preferred; `pack` can only run on "CPU", so we will annotate the op with "CPU" since "GPU" does not support this op. #### Raise Target Subgraphs Pass
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 29 18:32:13 UTC 2022 - 11.6K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad.cc
op.input(2), strides, padding, filter_attrs)); Conv2D::Attrs conv_attrs; conv_attrs.use_cudnn_on_gpu_ = use_cudnn_on_gpu; conv_attrs.explicit_paddings_ = explicit_paddings; conv_attrs.data_format_ = data_format; conv_attrs.dilations_ = dilations; grad_outputs->push_back( Conv2D(scope, grad_inputs[0], op.input(1), strides, padding, conv_attrs)); return scope.status(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 23:34:33 UTC 2022 - 24.5K bytes - Viewed (0) -
tensorflow/c/experimental/grappler/grappler_test.cc
ASSERT_EQ(status.message(), "'optimize_func' field in TP_Optimizer must be set."); } TEST(TF_GrapplerItem, NodesToPreserve) { GrapplerItem item; item.fetch = std::vector<string>{"Conv", "BiasAdd"}; std::unordered_set<string> nodes_preserved = item.NodesToPreserve(); TF_GrapplerItem* c_item = reinterpret_cast<TF_GrapplerItem*>(&item); int list_total_size = 0;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 13 22:30:58 UTC 2023 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_space_to_depth_pass.mlir
%5 = "tf.Pad"(%arg0, %3) : (tensor<2x224x224x3xf32>, tensor<4x2xi32>) -> tensor<2x230x230x3xf32> // CHECK: "tf.Conv2D" // CHECK-SAME: strides = [1, 1, 1, 1] // CHECK-SAME: (tensor<2x115x115x12xf32>, tensor<4x4x12x64xf32>) -> tensor<2x112x112x64xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 37.4K bytes - Viewed (0) -
src/cmd/compile/internal/walk/expr.go
if types.IsComplex[et] && n.Op() == ir.ODIV { t := n.Type() call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, typecheck.Conv(n.X, types.Types[types.TCOMPLEX128]), typecheck.Conv(n.Y, types.Types[types.TCOMPLEX128])) return typecheck.Conv(call, t) } // Nothing to do for float divisions. if types.IsFloat[et] { return n } // rewrite 64-bit div and mod on 32-bit architectures.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:34:01 UTC 2024 - 27.6K bytes - Viewed (0) -
test/live.go
// issue 7205 var i9 interface{} func f9() bool { g8() x := i9 y := interface{}(g18()) // ERROR "live at call to convT: x.data$" "live at call to g18: x.data$" "stack object .autotmp_[0-9]+ \[2\]string$" i9 = y // make y escape so the line above has to call convT return x != y } // liveness formerly confused by UNDEF followed by RET, // leading to "live at entry to f10: ~r1" (unnamed result).
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Dec 05 20:34:30 UTC 2023 - 18K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-tf-fake-quant-4bit.mlir
// CHECK: %[[DEQUANTIZE:.*]] = "tfl.dequantize"(%[[QUANTIZE]]) // CHECK: %[[CONV:.*]] = "tfl.conv_2d"(%arg0, %[[DEQUANTIZE]], %[[CONSTANT]]) // CHECK: return %[[CONV]] } // CHECK-LABEL: perChannelFakeQuantWithConv2D func.func @perChannelFakeQuantWithConv2D(tensor<256x32x32x3xf32>) -> (tensor<256x8x7x16xf32>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 22K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad_test.cc
#include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/random/random.h" namespace tensorflow { namespace { using ops::AvgPool; using ops::AvgPool3D; using ops::BiasAdd; using ops::Conv2D; using ops::Conv2DBackpropInput; using ops::DepthwiseConv2dNative; using ops::Elu; using ops::FractionalAvgPool; using ops::FractionalMaxPool; using ops::FusedBatchNormV3; using ops::L2Loss; using ops::LogSoftmax;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 22 20:45:22 UTC 2022 - 15K bytes - Viewed (0)