Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 3,790 for FusedN (0.13 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc

              contraction,
              "fused operation must be nested inside a function, If or While");
        }
    
        // If the contraction is used in multiple places, fusing it will only create
        // more contraction nodes, which is slower.
        if (!contraction.getResult().hasOneUse())
          return rewriter.notifyMatchFailure(contraction,
                                             "result is used by multiple ops");
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h

      // DT_FLOAT, DT_HALF, DT_QINT8, and DT_QUINT8. When DT_HALF is used, the
      // `weight_quantization` flag needs to set to true. When DT_QUINT8 is used,
      // the `weight_quantization` flag needs to set to false.
      tensorflow::DataType inference_type = tensorflow::DT_FLOAT;
    
      // The input and output data type during inference. This flag is only used
      // when `inference_type` is different from DT_FLOAT. This flag can only be set
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 10:16:19 UTC 2024
    - 10.8K bytes
    - Viewed (0)
  3. src/vendor/golang.org/x/sys/cpu/cpu.go

    	HasAVX512IFMA       bool // Advanced vector extension 512 Integer Fused Multiply Add
    	HasAVX512VBMI       bool // Advanced vector extension 512 Vector Byte Manipulation Instructions
    	HasAVX5124VNNIW     bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision
    	HasAVX5124FMAPS     bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 16:12:58 UTC 2024
    - 12.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc

        // TFLite fused embedding_lookup op.
        ConvertEmbeddedLookupFunc convert_embedded_lookup(func);
        if (failed(convert_embedded_lookup.VerifySignature())) return;
        func.eraseBody();
        func.addEntryBlock();
        convert_embedded_lookup.RewriteFunc();
      } else if (attr.getValue() == mlir::TFL::kLstmCellSimple) {
        // Check if the lstm cell simple can be fused, if not, we just don't do
        // anything.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/passes.td

                     "comma separated list of allowlisted functions to be quantized. Only used in tests">,
          Option<"quantize_signed_", "quantize-signed", "bool", "false",
                 "signed inference type. Only used in tests">,
          Option<"activation_number_of_bits_", "activation-number-of-bits", "int", "8",
                 "number of bits for inference type. Only used in tests">,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 22.6K bytes
    - Viewed (0)
  6. test/codegen/floats.go

    	return b0[idx] * b1
    }
    
    func indexStore(b0 []float64, b1 float64, idx int) {
    	// arm64:`FMOVD\sF[0-9]+,\s\(R[0-9]+\)\(R[0-9]+<<3\)`
    	b0[idx] = b1
    }
    
    // ----------- //
    //    Fused    //
    // ----------- //
    
    func FusedAdd32(x, y, z float32) float32 {
    	// s390x:"FMADDS\t"
    	// ppc64x:"FMADDS\t"
    	// arm64:"FMADDS"
    	// riscv64:"FMADDS\t"
    	return x*y + z
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 04 15:24:29 UTC 2024
    - 4.9K bytes
    - Viewed (0)
  7. platforms/core-runtime/BYTECODE-INTERCEPTION-README.md

        metadata("1. Artifact metadata: hash, name\n2. Dependencies instrumented type hierarchy")
        result("1. Transformed jar\n2. hash of original jar")
        buildService1[["Build service 1:\nused to provide type hierarchies of \ndependencies"]]
        buildService2[["Build service 2:\nused to get original Jar\nvia hash from original classpath"]]
        jar --> collect --> artifactOutput --> merge --> metadata --> instrument --> result
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Thu Apr 18 09:22:58 UTC 2024
    - 22.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td

        "mlir::stablehlo::StablehloDialect",
      ];
    }
    
    def UnwrapXlaCallModuleOpPass : Pass<"stablehlo-unwrap-xla-call-module-op", "ModuleOp"> {
      let summary = "Unwrap XlaCallModuleOps into inline functions if not used for quantizing fused patterns.";
      let dependentDialects = ["TF::TensorFlowDialect"];
    }
    
    def ConvertFuncToBfloat16Pass : Pass<"stablehlo-convert-func-to-bfloat16", "mlir::func::FuncOp"> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph_test.cc

    <{host_mlir_module = "#loc1 = loc(\22Reshape:\22)\0A#loc2 = loc(\22Reshape_4\22)\0A#loc3 = loc(\22Reshape\22)\0A#loc9 = loc(fused[#loc1, #loc2, #loc3])\0Amodule {\0A  func.func @host_func(%arg0: tensor<3360x?xi32> loc(fused[#loc1, #loc2, #loc3]), %arg1: tensor<3xi32> loc(fused[#loc1, #loc2, #loc3]), %arg2: tensor<i32> loc(fused[#loc1, #loc2, #loc3]), %arg3: tensor<2xi32> loc(fused[#loc1, #loc2, #loc3])) -> (tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1120x?xi32>, tensor<2xi32>) {\0A    %0 = \...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 08:08:57 UTC 2024
    - 11.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfr/passes/raise_to_tf.cc

        auto input_tfr_type =
            signature.getFunctionType().getInputs()[operand.index()];
    
        // There are three cases for the preceding input_op:
    
        // 1. The preceding op can be a tfr.cast op, which will be fused to the
        // current op, so the result op has input with tensor type.
        if (auto cast_op = dyn_cast_or_null<CastOp>(input_op)) {
          Value input_to_cast = CastToNonDerivedType(rewriter, call_op.getLoc(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 21.8K bytes
    - Viewed (0)
Back to top