Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 47 for Axis (0.04 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/tests/e2e/device-transform-nnapi.mlir

        %0 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, values_count = 2 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
        func.return %0 : tensor<2x1xf32>
        // CHECK: %[[VAL_0:.*]] = arith.constant dense<[2, 1]> : tensor<2xi32>
        // CHECK: %[[CONCAT:.*]] = "tfl.concatenation"(%arg0, %arg1) <{axis = 0 : i32, fused_activation_function = "NONE"}> : (tensor<1xf32>, tensor<1xf32>) -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.2K bytes
    - Viewed (0)
  2. tensorflow/cc/gradients/manip_grad.cc

                    const std::vector<Output>& grad_inputs,
                    std::vector<Output>* grad_outputs) {
      auto shift = op.input(1);
      auto axis = op.input(2);
      auto grad_op = Roll(scope, grad_inputs[0], Neg(scope, shift), axis);
      grad_outputs->push_back(grad_op);
      grad_outputs->push_back(NoGradient());
      grad_outputs->push_back(NoGradient());
      return scope.status();
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 19 12:19:42 UTC 2020
    - 1.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.cc

      }
    
      SmallVector<double, 4> scales;
      SmallVector<int64_t, 4> zeroPoints;
      scales.reserve(axisSize);
      zeroPoints.reserve(axisSize);
      for (size_t axis = 0; axis != axisSize; ++axis) {
        double rmin = rmins[axis];
        double rmax = rmaxs[axis];
        if (std::fabs(rmax - rmin) < std::numeric_limits<double>::epsilon()) {
          scales.push_back(1.0);
          zeroPoints.push_back(qmin);
          continue;
        }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 11:52:27 UTC 2024
    - 7.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/tests/e2e/simple-graph.mlir

      %2 = "tfl.add"(%arg0, %arg3) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
      %3 = "tfl.pack"(%1, %2) {axis = 0 : i32, values_count = 2 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
      func.return %3 : tensor<2x1xf32>
    }
    
    // CHECK: %[[CST:.*]] = arith.constant dense<1> : tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_types_test.cc

        func.func @main(%arg0: tensor<3x3x!tf_type.qint8>, %arg1: tensor<3x3x!tf_type.qint8>) -> tensor<6x3x!tf_type.qint8> {
          %axis = "tf.Const"() { value = dense<0> : tensor<i64> } : () -> tensor<i64>
          %1 = "tf.ConcatV2"(%arg0, %arg1, %axis) : (tensor<3x3x!tf_type.qint8>, tensor<3x3x!tf_type.qint8>, tensor<i64>) -> tensor<6x3x!tf_type.qint8>
          func.return %1 : tensor<6x3x!tf_type.qint8>
        }
      })";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 09:05:02 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-op-cost.mlir

    // -----
    
    func.func @pack_CPU(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<2x100xf32> attributes {tac.device = "CPU", tac.interface_name = "func_2"} {
      // CHECK: tac.cost = 1.000000e+02
      %0 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, tac.device = "CPU", values_count = 2 : i32} : (tensor<100xf32>, tensor<100xf32>) -> tensor<2x100xf32>
      func.return %0 : tensor<2x100xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:29:10 UTC 2022
    - 5.7K bytes
    - Viewed (0)
  7. tensorflow/cc/framework/gradient_checker_test.cc

      xs.push_back(Placeholder(scope, DT_DOUBLE, Placeholder::Shape(shape)));
      xs.push_back(Placeholder(scope, DT_DOUBLE, Placeholder::Shape(shape)));
      auto tmp = Stack(scope, xs, Stack::Axis(0));
      auto y = Unstack(scope, tmp, 2, Unstack::Axis(0));
      double max_error;
      TF_ASSERT_OK((ComputeGradientError<double, double, double>(
          scope, xs, {shape, shape}, y.output, {shape, shape}, &max_error)));
      EXPECT_LT(max_error, 1e-10);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Aug 06 15:54:08 UTC 2018
    - 6.7K bytes
    - Viewed (0)
  8. tensorflow/c/experimental/ops/array_ops.cc

    //
    // Description:
    //   Given a tensor `input`, this operation inserts a dimension of 1 at the
    //   dimension index `axis` of `input`'s shape. The dimension index `axis`
    //   starts at zero; if you specify a negative number for `axis` it is counted
    //   backward from the end.
    //
    //   This operation is useful if you want to add a batch dimension to a single
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 10 19:11:36 UTC 2022
    - 6.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.td

      [(IsNotInLiftedFunc $res), (IsConstTensor $b)], [], (addBenefit 1)>;
    
    def LiftGather : Pat<
      (TF_GatherV2Op:$res $params, $indices, $axis, $batch_dims),
      (LiftAsTFPartitionedCall<"composite_gather_fn">
        (ArgumentList $params, $indices, $axis),
        (ResultList $res),
        (NamedAttributeList
          (NamedAttr<"batch_dims"> $batch_dims))),
      [(IsNotInLiftedFunc $res), (IsConstTensor $params)], [], (addBenefit 1)>;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 3.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfrt/tests/convert_ref_variables.mlir

      // CHECK-SAME: (tensor<i32>, tensor<i32>, tensor<i32>) -> tensor<2xi32>
      %axis = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
      %0 = "tf.VariableV2"() {container = "", shape = #tf_type.shape<>, shared_name = "x"} : () -> tensor<!tf_type.int32ref>
      %1 = "tf.ConcatV2"(%0, %0, %axis) : (tensor<!tf_type.int32ref>, tensor<!tf_type.int32ref>, tensor<i32>) -> tensor<2xi32>
      func.return %1 : tensor<2xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 12:06:33 UTC 2022
    - 4.6K bytes
    - Viewed (0)
Back to top