Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 14 for input_tensor (0.35 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/integration_test/custom_aggregator_op_test.py

        ops.disable_eager_execution()
    
      def testBypassAndMinMax(self):
        with self.session():
          input_tensor = array_ops.constant(
              [1.0, 2.0, 3.0, 4.0, 5.0], dtypes.float32
          )
    
          aggregator = custom_aggregator_op_wrapper.custom_aggregator(
              input_tensor,
              id='1',
              calibration_method=_CalibrationMethod.CALIBRATION_METHOD_MIN_MAX,
          )
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 5.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/clone_constants_for_better_clustering_test.cc

      TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result));
    
      OutputTensor add1_operand;
      TF_ASSERT_OK(
          FindNodeByName(result.get(), "add1")->input_tensor(1, &add1_operand));
    
      OutputTensor add2_operand;
      TF_ASSERT_OK(
          FindNodeByName(result.get(), "add2")->input_tensor(1, &add2_operand));
    
      EXPECT_NE(add1_operand.node, add2_operand.node);
    }
    
    TEST(CloneConstantsForBetterClusteringTest, HostConstantPlacedOnCpu) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/custom_aggregator_op.cc

            max_percentile);
      }
    
      void Compute(OpKernelContext* context) override {
        const Tensor& input_tensor = context->input(0);
    
        // Use the same input for the first output.
        context->set_output(0, input_tensor);
    
        // Calculate min/max statistics.
        const auto input_flat = input_tensor.flat<float>();
        Tensor *min_output = nullptr, *max_output = nullptr;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  4. tensorflow/c/kernels/bitcast_op_test.cc

        return cpu_allocator();
      }
    };
    
    void TestBitcastOp(Tensor* input_tensor, DataType out_type,
                       TensorShape expected_shape, error::Code expected_code) {
      Status status;
      NodeDef def;
      def.set_op("Bitcast");
      def.set_device(DEVICE_CPU);
    
      AttrValue typeAttr;
      SetAttrValue(input_tensor->dtype(), &typeAttr);
    
      AttrValue outTypeAttr;
      SetAttrValue(out_type, &outTypeAttr);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jul 18 15:10:51 UTC 2022
    - 5.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/importer_test_min_max.cc

            // inject min/max to the input and output tensors
            auto& input_tensor = sub_graph->tensors[op->inputs[0]];
            input_tensor->quantization->scale.clear();
            input_tensor->quantization->zero_point.clear();
            input_tensor->quantization->min.push_back(-1.0);
            input_tensor->quantization->max.push_back(1.0);
    
            auto& output_tensor = sub_graph->tensors[op->outputs[0]];
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h

    absl::StatusOr<mlir::ElementsAttr> ConvertTensorProto(
        const TensorProto& input_tensor, mlir::Builder* builder);
    
    // Converts an TensorFlow tensor into an MLIR elements attribute.
    absl::StatusOr<mlir::ElementsAttr> ConvertTensor(const Tensor& input_tensor,
                                                     mlir::Builder* builder);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 26 09:37:10 UTC 2024
    - 2.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/pjrt_device_context.h

                                 absl::string_view tensor_name, Device* device,
                                 Tensor* cpu_tensor, StatusCallback done) override;
      void CopyTensorInSameDevice(const Tensor* input_tensor, Device* device,
                                  Tensor* output_tensor,
                                  StatusCallback done) const override;
    
      bool use_pjrt_tensor_buffer() const { return use_pjrt_tensor_buffer_; }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jul 19 19:27:39 UTC 2023
    - 2.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_host_send_device_context.h

                                 Tensor* cpu_tensor, StatusCallback done) override {
        done(errors::Internal("host->device copy not implemented."));
      }
    
      void CopyTensorInSameDevice(const Tensor* input_tensor, Device* device,
                                  Tensor* output_tensor,
                                  StatusCallback done) const override {
        done(errors::Internal("device->device copy not implemented."));
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 3.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tfrt/runtime_fallback/runtime_fallback_executor.cc

      exec_arguments.reserve(compute->num_arguments());
      exec_arguments.push_back(tfrt::GetReadyChain().release());
      for (const Tensor& input_tensor : arguments) {
        auto av = MakeAvailableAsyncValueRef<FallbackTensor>(input_tensor);
        exec_arguments.push_back(av.release());
      }
    
      // Space for returned values.
      llvm::SmallVector<RCReference<AsyncValue>> results(compute->num_results());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 9.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/xla_host_recv_device_context.h

                                 StringPiece tensor_name, Device* device,
                                 Tensor* cpu_tensor, StatusCallback done) override;
    
      void CopyTensorInSameDevice(const Tensor* input_tensor, Device* device,
                                  Tensor* output_tensor,
                                  StatusCallback done) const override {
        done(errors::Internal("device->device copy not implemented."));
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 3.9K bytes
    - Viewed (0)
Back to top