Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 36 for AddN (0.16 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

      // CHECK: [[ADD_N:%.*]] = "tf.AddN"(%arg0, [[ZERO]], [[ONE]])
      // CHECK: return %arg0, %arg0, [[ZERO]], [[ADD_N]]
      %2 = "tf.AddN"(%arg0, %1, %1) : (tensor<2xf32>, tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
      %3 = "tf.AddN"(%1, %arg0, %1) : (tensor<2xf32>, tensor<2xf32> , tensor<2xf32>) -> tensor<2xf32>
      %4 = "tf.AddN"(%1, %1) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tfr/examples/pad/ops_defs.py

              concat_dim=i, values=[reversed_left_padding, left_offset])
          from_right_padding = tf.raw_ops.Concat(
              concat_dim=i, values=[right_offset, reversed_right_padding])
        input_ = tf.raw_ops.AddN(
            inputs=[from_left_padding, core, from_right_padding])
    
      return input_
    
    
    @tf.RegisterGradient('NewMirrorPadGrad')
    def _mirror_pad_grad_grad(op, grad):
      mode = op.get_attr('mode')
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Oct 01 05:00:29 UTC 2021
    - 5.6K bytes
    - Viewed (0)
  3. tensorflow/c/kernels_experimental.h

    TF_CAPI_EXPORT extern bool TF_IsRefInput(TF_OpKernelContext* ctx, int i,
                                             TF_Status* status);
    
    #ifndef IS_MOBILE_PLATFORM
    // Expose higher level AddN operation for Pluggable vendors to implement
    // in the plugin for Variant data types. The API takes in the context and a
    // callback provided by pluggable vendor to do a Binary Add operation on the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Aug 07 14:44:39 UTC 2023
    - 9.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/shape_inference_test.cc

      auto c = ops::Placeholder(root.WithOpName("C"), DT_FLOAT);
      auto d = ops::Add(root.WithOpName("D"), a, b);
      auto e = ops::Add(root.WithOpName("E"), d, c);
      auto f = ops::Neg(root.WithOpName("F"), e);
      auto g = ops::AddN(root.WithOpName("G"), std::initializer_list<Output>{e, f});
    
      std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
      TF_CHECK_OK(root.ToGraph(graph.get()));
    
      GraphShapeInfo shape_info;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 00:41:19 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

      return rewriter.create<ConcatV2Op>(loc, type, ValueRange(vals), axis);
    }
    
    // Lowers AddN op to a sequence of AddV2 ops to accumulate operands.
    //
    // Note that to improve the parallelism, AddN op uses tree-based reduction.
    // For example, tf.AddN([0, 1, 2, 3, 4]) behaves as follows:
    //
    //                 0     1     2     3     4
    //                 |     |     |     |     |
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfrt/tests/mlrt/tf_to_mlrt.mlir

      // CHECK: [[d:%.*]] = tf_mlrt.await [[futures]]#2
      %d = "tf_mlrt.tf_await"(%future_d) : (!mlrt.future) ->tensor<i32>
    
      // CHECK: [[result:%.*]] = tf_mlrt.executeop([[b]], [[d]], [[f]])
      // CHECK-SAME: AddN
      %result = "tf.AddN"(%b, %d, %f) {__op_key = 9: i32}: (tensor<i32>, tensor<i32>, tensor<i32>) -> tensor<i32>
    
      // CHECK: mlrt.await_handle [[handle_0]]
      // CHECK: mlrt.await_handle [[handle_1]]
      mlrt.await_handle %handle_0
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 20:44:15 UTC 2024
    - 24.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/legalize-tensorlist.mlir

    }
    
    // -----
    
    // CHECK-LABEL: variantAddN
    func.func @variantAddN(%arg0: tensor<!tf_type.variant<tensor<*xi32>>>, %arg1: tensor<!tf_type.variant<tensor<*xi32>>>) -> tensor<!tf_type.variant<tensor<*xi32>>> {
      %1 = "tf.AddN"(%arg0, %arg1) : (tensor<!tf_type.variant<tensor<*xi32>>>, tensor<!tf_type.variant<tensor<*xi32>>>) -> tensor<!tf_type.variant<tensor<*xi32>>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  8. tensorflow/cc/gradients/math_grad_test.cc

      RunTest({x}, {x_shape}, {y}, {y_shape});
    }
    
    TEST_F(NaryGradTest, AddN) {
      TensorShape shape({3, 2, 5});
      std::vector<Output> xs;
      xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)));
      xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)));
      xs.push_back(Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape)));
      auto y = AddN(scope_, xs);
      RunTest(xs, {shape, shape, shape}, {y}, {shape});
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 36K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/tests/keras_imagenet_main.pbtxt

          type: DT_HALF
        }
      }
      attr {
        key: "Tmultiples"
        value {
          type: DT_INT32
        }
      }
    }
    node {
      name: "training/LossScaleOptimizer/gradients/AddN"
      op: "AddN"
      input: "training/LossScaleOptimizer/gradients/loss_1/fc1000/bias/Regularizer/Square_grad/Mul_1"
      input: "training/LossScaleOptimizer/gradients/fc1000_1/BiasAdd/Cast_grad/Cast"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 30 02:52:54 UTC 2019
    - 1.3M bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/tests/keras_imagenet_main_graph_mode.pbtxt

        value {
          type: DT_INT64
        }
      }
      attr {
        key: "Truncate"
        value {
          b: false
        }
      }
    }
    node {
      name: "training/SGD/gradients/AddN"
      op: "AddN"
      input: "training/SGD/gradients/fc1000_1/BiasAdd_grad/BiasAddGrad"
      input: "training/SGD/gradients/loss_1/fc1000/bias/Regularizer/Square_grad/Mul_1"
      device: "/job:localhost/replica:0/task:0/device:GPU:0"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 30 02:52:54 UTC 2019
    - 1.1M bytes
    - Viewed (0)
Back to top