Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 24 for Cumsum (0.75 sec)

  1. src/cmd/vendor/github.com/google/pprof/internal/report/source.go

    			fns := fileNodes[filename]
    			flatSum, cumSum := fns.Sum()
    
    			fnodes, _, err := getSourceFromFile(filename, reader, fns, 0, 0)
    			fmt.Fprintf(w, "ROUTINE ======================== %s in %s\n", name, filename)
    			fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n",
    				rpt.formatValue(flatSum), rpt.formatValue(cumSum),
    				measurement.Percentage(cumSum, rpt.total))
    
    			if err != nil {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 31 19:48:28 UTC 2024
    - 31.3K bytes
    - Viewed (0)
  2. tensorflow/cc/gradients/math_grad.cc

                      std::vector<Output>* grad_outputs) {
      if (op.num_inputs() != 2) {
        return errors::InvalidArgument("Cumsum requires 2 arguments");
      }
      if (grad_inputs.size() != 1) {
        return errors::InvalidArgument("Cumsum grad requires 1 grad input");
      }
    
      Cumsum::Attrs attrs;
      TF_RETURN_IF_ERROR(
          GetNodeAttr(op.node()->attrs(), "exclusive", &attrs.exclusive_));
      bool reverse;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 50.7K bytes
    - Viewed (0)
  3. tensorflow/cc/gradients/math_grad_test.cc

      int axis = std::get<2>(GetParam());
    
      TensorShape shape({2, 3, 2});
      auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
      Cumsum::Attrs attrs;
      attrs.exclusive_ = std::get<0>(GetParam());
      attrs.reverse_ = std::get<1>(GetParam());
      auto y = Cumsum(scope_, x, axis, attrs);
      RunTest({x}, {shape}, {y}, {shape});
    }
    
    INSTANTIATE_TEST_SUITE_P(CumsumGrad, CumsumGradTest,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 36K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      %axis_p3 = arith.constant dense<3> : tensor<i32>
      %res_m4 = "tfl.cumsum"(%arg, %axis_m4) {exclusive = false, reverse = false} : (tensor<1x2x1x3xf32>, tensor<i32>) -> tensor<1x2x1x3xf32>  // Eliminated
      %res_m3 = "tfl.cumsum"(%arg, %axis_m3) {exclusive = false, reverse = false} : (tensor<1x2x1x3xf32>, tensor<i32>) -> tensor<1x2x1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py

        """
        total_freq = sum(self._hist_freq)
        # hist_freq_cumsum is dividing cumulative sum of hist_freq by total_freq
        # hist_freq_cumsum's value is in range [0, 1] by its definition
        hist_freq_cumsum = np.cumsum(self._hist_freq) / total_freq
    
        # min_percentile and max_percentile are converted from [0, 100] to [0, 1].
        min_quantile, max_quantile = (
            self._calib_opts.calibration_parameters.min_percentile / 100.0,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 11 19:29:56 UTC 2024
    - 14.7K bytes
    - Viewed (0)
  6. src/cmd/vendor/github.com/google/pprof/internal/report/report.go

    			fmt.Fprintf(w, "    AKA ======================== %s\n", name)
    		}
    		fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n",
    			rpt.formatValue(flatSum), rpt.formatValue(cumSum),
    			measurement.Percentage(cumSum, rpt.total))
    
    		function, file, line := "", "", 0
    		for _, n := range ns {
    			locStr := ""
    			// Skip loc information if it hasn't changed from previous instruction.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 31 19:48:28 UTC 2024
    - 37.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

      // CHECK-LABEL: tranpose_arg64
      // CHECK: "tfl.transpose"
    }
    
    func.func @cumsum(%arg0: tensor<3x3xf32>, %arg1: tensor<i32>) -> tensor<3x3xf32> {
      %0 = "tf.Cumsum"(%arg0, %arg1) {exclusive = false, reverse = false} : (tensor<3x3xf32>, tensor<i32>) -> tensor<3x3xf32>
      func.return %0 : tensor<3x3xf32>
      // CHECK-LABEL: cumsum
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs

      NON_MAX_SUPPRESSION_V4 = 120,
      NON_MAX_SUPPRESSION_V5 = 121,
      SCATTER_ND = 122,
      SELECT_V2 = 123,
      DENSIFY = 124,
      SEGMENT_SUM = 125,
      BATCH_MATMUL = 126,
      PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
      CUMSUM = 128,
      CALL_ONCE = 129,
      BROADCAST_TO = 130,
      RFFT2D = 131,
      CONV_3D = 132,
      IMAG=133,
      REAL=134,
      COMPLEX_ABS=135,
      HASHTABLE = 136,
      HASHTABLE_FIND = 137,
      HASHTABLE_IMPORT = 138,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 14:28:27 UTC 2024
    - 30K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

      // CHECK: mhlo.constant dense<> : tensor<0xf32>
      %1 = "tf.Cumsum"(%arg0, %0) : (tensor<0xf32>, tensor<i32>) -> tensor<0xf32>
      func.return %1 : tensor<0xf32>
    }
    
    // -----
    
    // CHECK-LABEL: func @cumsum_dynamic
    func.func @cumsum_dynamic(%arg0: tensor<?xf32>, %arg1: tensor<i32>) -> tensor<?xf32> {
      // CHECK: "tf.Cumsum"
      %0 = "tf.Cumsum"(%arg0, %arg1) : (tensor<?xf32>, tensor<i32>) -> tensor<?xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/fuse-tftext.mlir

      %213 = "tf.Mul"(%212, %12) {device = ""} : (tensor<i64>, tensor<1xi64>) -> tensor<1xi64>
      %214 = "tf.Tile"(%213, %211) {device = ""} : (tensor<1xi64>, tensor<1xi64>) -> tensor<?xi64>
      %215 = "tf.Cumsum"(%214, %14) {device = "", exclusive = false, reverse = false} : (tensor<?xi64>, tensor<i32>) -> tensor<?xi64>
      %216 = "tf.ConcatV2"(%11, %215, %3) {device = ""} : (tensor<1xi64>, tensor<?xi64>, tensor<i32>) -> tensor<?xi64>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 460.3K bytes
    - Viewed (0)
Back to top