Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 50 for output1 (0.15 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

              inputs={'x1': in_placeholder_1}, outputs={'output1': output_tensor_1}
          )
    
          in_placeholder_2, output_tensor_2 = self._create_simple_tf1_conv_model()
          sig_def_2 = signature_def_utils_impl.predict_signature_def(
              inputs={'x2': in_placeholder_2}, outputs={'output2': output_tensor_2}
          )
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
  2. platforms/software/dependency-management/src/integTest/groovy/org/gradle/integtests/resolve/transform/ArtifactTransformIntegrationTest.groovy

                    void transform(TransformOutputs outputs) {
                        def input = inputArtifact.get().asFile
                        File outputA = outputs.file(input.name + ".A.txt")
                        assert outputA.parentFile.directory && outputA.parentFile.list().length == 0
                        outputA.text = "Output A"
    
                        File outputB = outputs.file(input.name + ".B.txt")
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Tue Nov 07 18:43:42 UTC 2023
    - 100.8K bytes
    - Viewed (0)
  3. platforms/software/dependency-management/src/integTest/groovy/org/gradle/integtests/resolve/transform/ArtifactTransformCachingIntegrationTest.groovy

                    @Override
                    void transform(TransformOutputs outputs) {
                        // Simulate transform leaving file open
                        def output = outputs.file("output.txt")
                        output.createNewFile()
                        TestState.fileKeptOpen = output.newOutputStream()
                        TestState.fileKeptOpen << "output"
                        TestState.fileKeptOpen.flush()
                    }
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Fri Jun 07 11:52:44 UTC 2024
    - 97.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/translate/import_model.cc

      std::vector<TensorId> outputs;
      output_node_names.reserve(specs.outputs.size());
      for (const auto& output : specs.outputs) {
        TensorId tensor = ParseTensorName(output);
        auto remapped_it = remapped_feeds_.find(tensor);
        if (remapped_it != remapped_feeds_.end()) {
          output_node_names.insert(remapped_it->second);
          outputs.push_back({remapped_it->second, 0});
        } else {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 183.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

      let summary = [{While loop}];
    
      let description = [{
        output = input; while (cond(output)) { output = body(output) }
    
        While loop where all values are passes through arguments with implicit
        capture.
    
        input: A list of input tensors whose types are T.
        output: A list of output tensors whose types are T.
        cond: A region that takes 'input' and returns a boolean scalar tensor.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td

    }
    
    def ExecutorConvertControlToDataOutputsPass : Pass<"tf-executor-convert-control-to-data-outputs", "ModuleOp"> {
      let summary = "Chain control outputs of while loop body";
    
      let description = [{
        This pass converts the control outputs of a while loop body function to data
        outputs. Thus, inter iteration control dependencies are transformed to
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 99.6K bytes
    - Viewed (0)
  7. src/cmd/compile/internal/ssa/_gen/AMD64Ops.go

    	var (
    		gp01           = regInfo{inputs: nil, outputs: gponly}
    		gp11           = regInfo{inputs: []regMask{gp}, outputs: gponly}
    		gp11sp         = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
    		gp11sb         = regInfo{inputs: []regMask{gpspsbg}, outputs: gponly}
    		gp21           = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
    		gp21sp         = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Aug 04 16:40:24 UTC 2023
    - 98K bytes
    - Viewed (1)
  8. tensorflow/c/c_api.cc

      }
      return outputs;
    }
    
    void TFOutputsFromOutputs(const std::vector<tensorflow::Output>& outputs,
                              TF_Output* tf_outputs) {
      for (int i = 0; i < outputs.size(); i++) {
        tf_outputs[i].oper = ToOperation(outputs[i].node());
        tf_outputs[i].index = outputs[i].index();
      }
    }
    #endif  // !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 03:35:10 UTC 2024
    - 102.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/flatbuffer_export.cc

              // node we're updating is the right one for now we're just ensuring
              // they have the same number of input, output and first output is
              // the same
              if (key_inputs.size() == inputs.size() &&
                  key_outputs.size() == outputs.size() &&
                  key_outputs[0] == outputs[0]) {
                ret &=
                    op->mutate_large_custom_options_offset(custom_op.second.first);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:41:49 UTC 2024
    - 164.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc

    // `tfl.batch_matmul` when it accepts uniform quantized tensors.
    //
    // StableHLO Quantizer output:
    //   * input: per-tensor qi8
    //   * filter: per-channel qi8 for non-batching op, per-tensor for batching op.
    //   * output: per-tensor qi32
    // JAX Quantizer output:
    //   * input: per-tensor qi8
    //   * filter: per-channel qi8
    //   * output: per-tensor qi8
    //
    // Conditions for the `tfl.batch_matmul` conversion:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 09:00:19 UTC 2024
    - 99.8K bytes
    - Viewed (0)
Back to top