Search Options

Results per page
Sort
Preferred Languages
Advance

Results 101 - 110 of 652 for output0 (0.13 sec)

  1. tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel_4bit.pbtxt

    # CHECK:       buffer: 6,
    # CHECK:       name: "output",
    # CHECK:       quantization: {
    # CHECK:         scale: [ 0.093635 ],
    # CHECK:         zero_point: [ 22 ]
    # CHECK:       }
    # CHECK:     } ],
    # CHECK:     inputs: [ 0 ],
    # CHECK:     outputs: [ 5 ],
    # CHECK:     operators: [ {
    # CHECK:       inputs: [ 0, 3, 2 ],
    # CHECK:       outputs: [ 4 ],
    # CHECK:       builtin_options_type: Conv2DOptions,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/tpu_sharding_identification.mlir

      %1 = "tf.Identity"(%0) : (tensor<*xi32>) -> (tensor<*xi32>)
      func.return %1 : tensor<*xi32>
    }
    
    // -----
    
    // Tests partitioned data inputs/outputs are set correctly (via XLA SPMD) is
    // enabled. Non replicated inputs/outputs should have shardings set to be
    // replicate sharding ("").
    
    // CHECK-LABEL: func @partitioned_input_output
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 20 19:07:52 UTC 2024
    - 47.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_merge_variables_with_execute.cc

      if (var_access_info.per_resource_info.empty()) return success();
    
      // Start creating the new TPUExecuteAndUpdateVariables op.
      builder->setInsertionPoint(execute_launch);
      // Output types. Skip the original outputs for merged assigns.
      llvm::SmallVector<Type, 8> new_output_types;
      int old_output_index = 0;
      for (const auto& type : execute_launch.getResultTypes()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 29 17:52:11 UTC 2024
    - 27K bytes
    - Viewed (0)
  4. testing/integ-test/src/integTest/groovy/org/gradle/integtests/TaskExecutionIntegrationTest.groovy

            }
        }
    
        def "produces a sensible error when a task declares both outputs and destroys"() {
            buildFile << """
                task a {
                    outputs.file('foo')
                    destroyables.register('bar')
                }
            """
            file('foo') << 'foo'
            file('bar') << 'bar'
    
            expect:
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Thu Apr 04 07:21:38 UTC 2024
    - 25.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/tpu_validate_inputs.cc

      int arity = rep.getOutputs().size();
      if (arity != num_replicas) {
        rep.emitOpError(
            "TF2XLA TPU bridge input check: number of outputs inconsistent.")
            << " num_replicas=" << num_replicas << " no. of outputs=" << arity;
        return false;
      }
      for (auto& pred : GetPredecessors(rep)) {
        if (!IsTpuRegularOp(pred)) continue;
        auto errormsg = [&]() -> std::string {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 06:51:01 UTC 2024
    - 21.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_custom_aggregation_ops.mlir

      func.func @serving_default(%arg0: tensor<1x4xf32> {tf_saved_model.index_path = ["x"]}) -> (tensor<1x3xf32> {tf_saved_model.index_path = ["output"]}) attributes {tf.entry_function = {control_outputs = "", inputs = "serving_default_x:0", outputs = "PartitionedCall:0"}, tf_saved_model.exported_names = ["serving_default"]} {
        %cst = "tf.Const"() <{value = dense<[0, 1]> : tensor<2xi32>}> {device = ""} : () -> tensor<2xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 32.1K bytes
    - Viewed (0)
  7. tensorflow/c/c_api_experimental.cc

      VLOG(1) << "Running the dequeue op";
      TF_Output output{dequeue_op, 0};
      TF_Tensor* ret;
      TF_SessionRun(session, /*run_options*/ nullptr,
                    // input related parameters
                    /*inputs*/ nullptr, /*input_values*/ nullptr, /*ninputs*/ 0,
                    // output related parameters
                    /*outputs*/ &output, /*output_values*/ &ret,
                    /*noutputs*/ 1,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 03:35:10 UTC 2024
    - 29.4K bytes
    - Viewed (0)
  8. platforms/core-execution/execution/src/main/java/org/gradle/internal/execution/steps/AssignImmutableWorkspaceStep.java

     * Assigns an immutable workspace to the work, and makes sure it contains the correct outputs.
     *
     * <ul>
     * <li>If an immutable workspace already exists, it is checked for consistency, and is returned
     * if found correct.</li>
     * <li>If the workspace is inconsistent (the output hashes stored in {code metadata.bin} do not match
     * the hashes taken by snapshotting the current outputs), the workspace is moved to a temporary
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Tue Mar 19 16:44:11 UTC 2024
    - 19.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc

      // Keras lstm tf.api_implements usually has attribute like "lstm_abcde91...".
      // TODO(b/147436982): we need to make sure that only the
      // outputs(full sequence) is used, not the last_output, not the new_states.
      // We will discard everything except the outputs.
      // And the outputs is in the shape of [batch, time, units].
      if (attr.getValue().starts_with("lstm_")) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto

    message DebuggerConfig {
      // Type of quantization debugger. Depending on the type, inputs and outputs
      // are wired differently.
      // NEXT ID: 4
      enum DebuggerType {
        DEBUGGER_TYPE_UNSPECIFIED = 0;
        // DEBUGGER_TYPE_WHOLE_MODEL creates two tf.Savedmodel - unquantized and
        // quantized model with DumpTensor added to outputs of quantizable layers.
        // The DumpTensor dumps entire value of its input to a specified file. When
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 14.3K bytes
    - Viewed (0)
Back to top