- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 53 for relX (0.12 sec)
-
src/cmd/link/internal/ld/data.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Jun 12 15:10:50 UTC 2024 - 100.5K bytes - Viewed (1) -
src/cmd/internal/obj/ppc64/asm9.go
} rel = obj.Addrel(c.cursym) rel.Off = int32(c.pc) rel.Siz = 8 rel.Sym = s rel.Add = d if c.ctxt.Flag_shared { switch form { case D_FORM: rel.Type = objabi.R_ADDRPOWER_TOCREL case DS_FORM: rel.Type = objabi.R_ADDRPOWER_TOCREL_DS } } else { switch form { case D_FORM: rel.Type = objabi.R_ADDRPOWER case DS_FORM:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 13:55:28 UTC 2024 - 156.1K bytes - Viewed (0) -
src/cmd/internal/obj/arm64/asm7.go
break } rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) rel.Siz = 4 rel.Sym = p.To.Sym rel.Add = p.To.Offset rel.Type = objabi.R_CALLARM64 case 6: /* b ,O(R); bl ,O(R) */ o1 = c.opbrr(p, p.As) o1 |= uint32(p.To.Reg&31) << 5 if p.As == obj.ACALL { rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) rel.Siz = 0 rel.Type = objabi.R_CALLIND
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 15:44:14 UTC 2024 - 201.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
('none', None, False, False, quant_opts_pb2.TF, False, 'SAME'), ('relu', nn_ops.relu, False, False, quant_opts_pb2.TF, False, 'SAME'), ('relu6', nn_ops.relu6, False, False, quant_opts_pb2.TF, False, 'SAME'), ('with_bias', None, True, False, quant_opts_pb2.TF, False, 'SAME'), ( 'with_bias_and_relu', nn_ops.relu, True, False, quant_opts_pb2.TF,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
src/cmd/internal/obj/s390x/asmz.go
c.ctxt.Diag("require symbol to apply relocation") } offset := int64(2) // relocation offset from start of instruction rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc + offset) rel.Siz = 4 rel.Sym = sym rel.Add = add + offset + int64(rel.Siz) rel.Type = objabi.R_PCRELDBL return rel } func (c *ctxtz) addrilrelocoffset(sym *obj.LSym, add, offset int64) *obj.Reloc { if sym == nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 16 17:46:09 UTC 2024 - 176.7K bytes - Viewed (0) -
src/cmd/internal/obj/x86/asm6.go
ctxt.Headtype != objabi.Hwindows { rel = obj.Reloc{} rel.Type = objabi.R_TLS_LE rel.Siz = 4 rel.Sym = nil rel.Add = int64(v) v = 0 } if v == 0 && rel.Siz == 0 && base != REG_BP && base != REG_R13 { ab.Put1(byte(0<<6 | reg[base]<<0 | r<<3)) return } if disp8, ok := toDisp8(v, p, ab); ok && rel.Siz == 0 { ab.Put2(byte(1<<6|reg[base]<<0|r<<3), disp8)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 15:44:14 UTC 2024 - 146.9K bytes - Viewed (0) -
src/cmd/go/internal/load/pkg.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 28 17:00:51 UTC 2024 - 120K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
let hasFolder = 1; } def TFL_ReluOp: TFL_Op<"relu", [ PredOpTrait<"x and y must have same element type", TFL_TCresVTEtIsSameAsOp<0, 0>>, Pure, QuantizableResult, SameOperandsAndResultShape]> { let summary = "Relu operator"; let description = [{ Element-wise Relu operator x -> max(0, x) }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir
func.return %0 : tensor<*xf32> } // CHECK-LABEL: testMaximumOfZeroToReluFloat func.func @testMaximumOfZeroToReluFloat(%arg0: tensor<4xf32>) -> tensor<4xf32> { // CHECK: %0 = "tf.Relu"(%arg0) {device = "/job:localhost/replica:0/task:0/device:GPU:0"} : (tensor<4xf32>) -> tensor<4xf32> // CHECK: return %0 %cst_0 = arith.constant dense<0.000000e+00> : tensor<f32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 132.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/ops.mlir
// CHECK: "NONE" %0 = tfl.add %arg0, %arg1 {fused_activation_function = "NONE"} : tensor<4xi32> // CHECK: "RELU" %1 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU"} : tensor<4xi32> // CHECK: "RELU_N1_TO_1" %2 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU_N1_TO_1"} : tensor<4xi32> // CHECK: "RELU6"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 189.2K bytes - Viewed (0)