Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 20 for Axis (0.06 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir

      // CHECK-DAG: %[[ITEMS1_3:.*]] = "tf.ExpandDims"(%[[ITEMS1]]#3, %[[AXIS]])
      // CHECK-DAG: %[[ITEMS1_2:.*]] = "tf.ExpandDims"(%[[ITEMS1]]#2, %[[AXIS]])
      // CHECK-DAG: %[[ITEMS1_1:.*]] = "tf.ExpandDims"(%[[ITEMS1]]#1, %[[AXIS]])
      // CHECK-DAG: %[[ITEMS1_0:.*]] = "tf.ExpandDims"(%[[ITEMS1]]#0, %[[AXIS]])
      // CHECK-DAG: %[[ITEMS0_0:.*]] = "tf.ExpandDims"(%[[ITEMS0]], %[[AXIS]])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 92K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

      %6 = "tfl.concatenation"(%1, %0) {axis = -1 : i32, fused_activation_function = "NONE"} : (tensor<1x1xf32>, tensor<1x1xf32>) -> tensor<1x2xf32>
      %7 = "quantfork.stats"(%6) {layerStats = dense<[-0.440728068, 0.26483655]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

    // to expand at the given `axis`.
    Type InferExpandDimsType(Type ty, int64_t axis, Builder *builder) {
      auto ranked_ty = mlir::dyn_cast<RankedTensorType>(ty);
    
      // Unranked type.
      if (!ranked_ty) return ty;
    
      auto shape = llvm::to_vector<4>(ranked_ty.getShape());
      if (axis < 0) axis += ranked_ty.getRank() + 1;
    
      shape.insert(shape.begin() + axis, 1);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/unroll-batch-matmul.mlir

      // CHECK: %[[MATMUL_PACKED:.*]] = "tf.Pack"(%[[MATMUL_1]], %[[MATMUL_2]], %[[MATMUL_3]], %[[MATMUL_4]], %[[MATMUL_5]], %[[MATMUL_6]]) <{axis = 0 : i64}> : (tensor<4x6xf32>, tensor<4x6xf32>, tensor<4x6xf32>, tensor<4x6xf32>, tensor<4x6xf32>, tensor<4x6xf32>) -> tensor<6x4x6xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 06 18:42:28 UTC 2023
    - 63.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

    // Eliminate cumulative summations if the input's dimension in axis is 1.
    def EliminateCumSumInclusive : Pat<
      (TFL_CumsumOp
         $input,
         (Arith_ConstantOp I32ElementsAttr:$axis),
         ConstBoolAttrFalse,
         $reverse),
      (replaceWithValue $input),
      [(AreInputDimensionsOneInAxes $input, $axis)]>;
    
    // Fusing raw computation of GELU op into one native tfl_gelu op.
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

    // CHECK:     %17 = "tfl.gather"(%arg13, %14) <{axis = 0 : i32, batch_dims = 0 : i32}> {tac.device = "DARWINN", tac.inference_type = "FLOAT"} : (tensor<5xi32>, tensor<?xi32>) -> tensor<?xi32>
    // CHECK:     %18 = tfl.add %arg14, %17 {fused_activation_function = "NONE", tac.device = "DARWINN", tac.inference_type = "FLOAT"} : tensor<?xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  7. tensorflow/cc/gradients/math_grad.cc

      bool reverse;
      TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "reverse", &reverse));
      attrs.reverse_ = !reverse;
    
      auto axis = op.input(1);
      auto sum = Cumsum(scope, grad_inputs[0], axis, attrs);
      grad_outputs->push_back(sum.out);
      grad_outputs->push_back(NoGradient());
      return scope.status();
    }
    REGISTER_GRADIENT_OP("Cumsum", CumsumGrad);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 50.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfr/python/tfr_gen.py

        return type_
    
      def _pack_tensor_list(self, value):
        # This is packing a list of tensors, then the axis is 0.
        axis = self._ssa_name('zero')
        self._emit_with_loc('\n{} = arith.constant 0 : i64'.format(axis))
        casted = self._ssa_name('pack')
        self.emit('\n{} = tfr.call @tf__pack({}, {})'.format(casted, value, axis))
        self._emit_with_loc(' : (!tfr.tensor_list, i64) -> !tfr.tensor')
        # load the op def of tf.Pack
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 27 15:27:03 UTC 2022
    - 55.8K bytes
    - Viewed (0)
  9. tensorflow/c/eager/c_api_test.cc

      TFE_TensorHandle* axis = TestAxisTensorHandle(ctx);
      TFE_Op* minOp = MinOp(ctx, input, axis);
      TFE_TensorHandle* retvals[1] = {nullptr};
      int num_retvals = 1;
      TFE_Execute(minOp, &retvals[0], &num_retvals, status);
      EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
      TFE_DeleteOp(minOp);
      TFE_DeleteTensorHandle(input);
      TFE_DeleteTensorHandle(axis);
      ASSERT_EQ(1, num_retvals);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 03 20:50:20 UTC 2023
    - 94.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

    // In above calculation, they are replaced by new values. These new mean and
    // variance are calculated as following:
    // new_mean = mean(x, axis=[0, 1, 2])
    // new_variance = mean(squared_difference(x, new_mean), axis=[0, 1, 2])
    //
    // The DDR rule for the is_training equals true case is as following:
    // def : Pattern<
    //     (TF_FusedBatchNormV3Op:$root
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
Back to top