Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 119 for relu (0.06 sec)

  1. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-prefer-tf2xla.mlir

        data_format = "NHWC", dilations = [1, 1, 1, 1], epsilon = 9.99999974E-5 : f32, explicit_paddings = [], filter_format = "HWIO", fused_ops = ["BiasAdd", "Relu"], leakyrelu_alpha = 2.000000e-01 : f32, num_args = 2 : i64, operandSegmentSizes = array<i32: 1, 1, 2, 2>, padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 15.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py

          padding: str = 'SAME',
          has_func_alias: bool = False,
      ) -> module.Module:
        class ConvModel(module.Module):
          """A simple model with a single conv2d, bias and relu."""
    
          def __init__(self):
            self.out_channel_size = filter_shape[-1]
    
            # This ensures filters will have different value range per out channel
            self.filters = np.stack(
                [
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfr/ir/tfr_ops.td

       range for the fused activation `act` with the quantization defined by the
       `scale` and `zero point`. Currently, the allowed activations are
       `NONE`, `RELU`, `RELU6` and `RELU_N1_TO_1`.
    
        Example:
    
        ```mlir
        %3, %4 = tfr.quant_act_range(%2, %1, %0) :
            (tfr.attr, float, i64) -> (tfr.tensor, tfr.tensor)
        ```
      }];
    
      let arguments = (ins
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 10:54:29 UTC 2024
    - 17.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td

                   (BinBroadcastDimensions $one, $features))))>;
    
    //===----------------------------------------------------------------------===//
    // Relu op patterns.
    //===----------------------------------------------------------------------===//
    
    // TODO(hinsu): Make these patterns to TF to TF lowering. Relu6 lowering will
    // require HLO canonicalization of min and max on a tensor to ClampOp.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 34.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir

        %1 = "tf.Relu"(%0) {device = ""} : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2xf32>
        return %1 : tensor<1x3x2x2xf32>
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 25.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/flatbuffer_operator.cc

      return llvm::StringSwitch<tflite::ActivationFunctionType>(str)
          .Case("NONE", tflite::ActivationFunctionType_NONE)
          .Case("RELU", tflite::ActivationFunctionType_RELU)
          .Case("RELU_N1_TO_1", tflite::ActivationFunctionType_RELU_N1_TO_1)
          .Case("RELU6", tflite::ActivationFunctionType_RELU6)
          .Case("TANH", tflite::ActivationFunctionType_TANH)
          .Case("SIGN_BIT", tflite::ActivationFunctionType_SIGN_BIT);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 38K bytes
    - Viewed (0)
  7. src/debug/elf/file.go

    				continue
    			}
    			val64 := sym.Value + uint64(rela.Addend)
    			f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64)
    		case R_X86_64_32:
    			if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 {
    				continue
    			}
    			val32 := uint32(sym.Value) + uint32(rela.Addend)
    			f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32)
    		}
    	}
    
    	return nil
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 23 16:49:58 UTC 2024
    - 43.1K bytes
    - Viewed (0)
  8. src/cmd/link/internal/arm64/asm.go

    	o(0x90000000)
    	o(0x91000000)
    	rel, _ := initfunc.AddRel(objabi.R_ADDRARM64)
    	rel.SetOff(0)
    	rel.SetSiz(8)
    	rel.SetSym(ctxt.Moduledata)
    
    	// 8:	14000000 	b	0 <runtime.addmoduledata>
    	// 	8: R_AARCH64_CALL26	runtime.addmoduledata
    	o(0x14000000)
    	rel2, _ := initfunc.AddRel(objabi.R_CALLARM64)
    	rel2.SetOff(8)
    	rel2.SetSiz(4)
    	rel2.SetSym(addmoduledata)
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jan 30 20:09:45 UTC 2024
    - 47K bytes
    - Viewed (0)
  9. src/cmd/link/internal/amd64/asm.go

    			// these fields in the 'reloc' phase.
    			rela := ldr.MakeSymbolUpdater(syms.Rela)
    			rela.AddAddrPlus(target.Arch, s, int64(r.Off()))
    			if r.Siz() == 8 {
    				rela.AddUint64(target.Arch, elf.R_INFO(0, uint32(elf.R_X86_64_RELATIVE)))
    			} else {
    				ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ))
    			}
    			rela.AddAddrPlus(target.Arch, targ, int64(r.Add()))
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Aug 23 05:58:20 UTC 2023
    - 21K bytes
    - Viewed (0)
  10. src/cmd/link/internal/s390x/asm.go

    		//.plt index
    		plt.AddUint32(target.Arch, uint32(rela.Size())) // rela size before current entry
    
    		// rela
    		rela.AddAddrPlus(target.Arch, got.Sym(), got.Size()-8)
    
    		sDynid := ldr.SymDynid(s)
    		rela.AddUint64(target.Arch, elf.R_INFO(uint32(sDynid), uint32(elf.R_390_JMP_SLOT)))
    		rela.AddUint64(target.Arch, 0)
    
    		ldr.SetPlt(s, int32(plt.Size()-32))
    
    	} else {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Aug 23 05:58:20 UTC 2023
    - 13.7K bytes
    - Viewed (0)
Back to top