Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 50 for input_tensor (0.16 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/integration_test/custom_aggregator_op_test.py

        ops.disable_eager_execution()
    
      def testBypassAndMinMax(self):
        with self.session():
          input_tensor = array_ops.constant(
              [1.0, 2.0, 3.0, 4.0, 5.0], dtypes.float32
          )
    
          aggregator = custom_aggregator_op_wrapper.custom_aggregator(
              input_tensor,
              id='1',
              calibration_method=_CalibrationMethod.CALIBRATION_METHOD_MIN_MAX,
          )
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/python/representative_dataset_test.py

            inputs={'input_tensor': meta_graph_pb2.TensorInfo(name='input:0')}
        )
    
        with self.session():
          input_tensor = constant_op.constant([1, 2, 3, 4, 5, 6])
          sample = {'input_tensor': input_tensor}
    
          feed_dict = repr_dataset.create_feed_dict_from_input_data(
              sample, signature_def
          )
          input_tensor_data = input_tensor.eval()
    
        self.assertLen(feed_dict, 1)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jan 04 07:35:19 UTC 2024
    - 11.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/clone_constants_for_better_clustering_test.cc

      TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result));
    
      OutputTensor add1_operand;
      TF_ASSERT_OK(
          FindNodeByName(result.get(), "add1")->input_tensor(1, &add1_operand));
    
      OutputTensor add2_operand;
      TF_ASSERT_OK(
          FindNodeByName(result.get(), "add2")->input_tensor(1, &add2_operand));
    
      EXPECT_NE(add1_operand.node, add2_operand.node);
    }
    
    TEST(CloneConstantsForBetterClusteringTest, HostConstantPlacedOnCpu) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc

      return ElementsAttr(DenseStringElementsAttr::get(type, string_refs));
    }
    
    absl::StatusOr<ElementsAttr> ConvertTensor(const Tensor& input_tensor,
                                               Builder* builder) {
      const auto& input_dtype = input_tensor.dtype();
      const auto& input_shape = input_tensor.shape();
      Type elt_type;
      TF_RETURN_IF_ERROR(ConvertDataType(input_dtype, *builder, &elt_type));
      SmallVector<int64_t, 4> shape;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 26 09:37:10 UTC 2024
    - 20.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py

            pass
    
          @def_function.function
          def add(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
            """Performs an add operation.
    
            Args:
              input_tensor: Input tensor to perform add on.
    
            Returns:
              A map of: output key -> output result.
            """
            out = math_ops.add(input_tensor, input_tensor)
            return {'output': out}
    
        model = AddModel()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/custom_aggregator_op.cc

            max_percentile);
      }
    
      void Compute(OpKernelContext* context) override {
        const Tensor& input_tensor = context->input(0);
    
        // Use the same input for the first output.
        context->set_output(0, input_tensor);
    
        // Calculate min/max statistics.
        const auto input_flat = input_tensor.flat<float>();
        Tensor *min_output = nullptr, *max_output = nullptr;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

            ]
        )
        def __call__(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
          """Performs a matrix multiplication.
    
          Args:
            input_tensor: Input tensor to matmul with the filter.
    
          Returns:
            A map of: output key -> output result.
          """
    
          out = math_ops.matmul(input_tensor, self.filters)
          return {'output': out}
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

          @def_function.function
          def matmul(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
            """Performs a matrix multiplication.
    
            Args:
              input_tensor: Input tensor to matmul with the filter.
    
            Returns:
              A 'output' -> output tensor mapping
            """
            out = math_ops.matmul(input_tensor, random_tensor_gen_fn((2, 3)))
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  9. tensorflow/c/kernels/bitcast_op_test.cc

        return cpu_allocator();
      }
    };
    
    void TestBitcastOp(Tensor* input_tensor, DataType out_type,
                       TensorShape expected_shape, error::Code expected_code) {
      Status status;
      NodeDef def;
      def.set_op("Bitcast");
      def.set_device(DEVICE_CPU);
    
      AttrValue typeAttr;
      SetAttrValue(input_tensor->dtype(), &typeAttr);
    
      AttrValue outTypeAttr;
      SetAttrValue(out_type, &outTypeAttr);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jul 18 15:10:51 UTC 2022
    - 5.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/post_quantize.cc

          // Write the value from `input_tensor` if it is the last axis or
          // recurse into the next axis.
          const bool is_last_axis = output_axis == num_dimensions - 1;
          if (is_last_axis) {
            new_values->push_back(
                input_tensor.getValues<Attribute>()[*input_indices]);
          } else {
            ComputePermutation(input_tensor, perm, output_shape, num_dimensions,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.1K bytes
    - Viewed (0)
Back to top