Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/next/external/cache/sources/wl/include/linuxver.h
Views: 3959
/*1* Linux-specific abstractions to gain some independence from linux kernel versions.2* Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.3*4* $Copyright Open Broadcom Corporation$5*6* $Id: linuxver.h 372519 2012-12-04 01:21:16Z $7*/89#ifndef _linuxver_h_10#define _linuxver_h_1112#include <linux/version.h>13#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))14#include <linux/config.h>15#else16#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33))17#include <generated/autoconf.h>18#else19#include <linux/autoconf.h>20#endif21#endif2223#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))24#include <linux/kconfig.h>25#endif2627#include <linux/module.h>2829#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))3031#ifdef __UNDEF_NO_VERSION__32#undef __NO_VERSION__33#else34#define __NO_VERSION__35#endif36#endif3738#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)39#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i")40#define module_param_string(_name_, _string_, _size_, _perm_) \41MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))42#endif434445#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))46#include <linux/malloc.h>47#else48#include <linux/slab.h>49#endif5051#include <linux/types.h>52#include <linux/init.h>53#include <linux/mm.h>54#include <linux/string.h>55#include <linux/pci.h>56#include <linux/interrupt.h>57#include <linux/netdevice.h>58#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))59#include <linux/semaphore.h>60#else61#include <asm/semaphore.h>62#endif63#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))64#undef IP_TOS65#endif66#include <asm/io.h>6768#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))69#include <linux/workqueue.h>70#else71#include <linux/tqueue.h>72#ifndef work_struct73#define work_struct tq_struct74#endif75#ifndef INIT_WORK76#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))77#endif78#ifndef schedule_work79#define schedule_work(_work) schedule_task((_work))80#endif81#ifndef flush_scheduled_work82#define flush_scheduled_work() flush_scheduled_tasks()83#endif84#endif8586#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))87#define DAEMONIZE(a) daemonize(a); \88allow_signal(SIGKILL); \89allow_signal(SIGTERM);90#else91#define RAISE_RX_SOFTIRQ() \92cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)93#define DAEMONIZE(a) daemonize(); \94do { if (a) \95strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \96} while (0);97#endif9899#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)100#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func)101#else102#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work)103#if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \104(RHEL_MAJOR == 5))105106typedef void (*work_func_t)(void *work);107#endif108#endif109110#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))111112#ifndef IRQ_NONE113typedef void irqreturn_t;114#define IRQ_NONE115#define IRQ_HANDLED116#define IRQ_RETVAL(x)117#endif118#else119typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);120#endif121122#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)123#define IRQF_SHARED SA_SHIRQ124#endif125126#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)127#ifdef CONFIG_NET_RADIO128#define CONFIG_WIRELESS_EXT129#endif130#endif131132#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)133#define MOD_INC_USE_COUNT134#define MOD_DEC_USE_COUNT135#endif136137#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)138#include <linux/sched.h>139#endif140141#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)142#include <net/lib80211.h>143#endif144#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)145#include <linux/ieee80211.h>146#else147#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)148#include <net/ieee80211.h>149#endif150#endif151152153#ifdef CUSTOMER_HW4154#include <linux/kthread.h>155#endif156157#ifndef __exit158#define __exit159#endif160#ifndef __devexit161#define __devexit162#endif163#ifndef __devinit164#define __devinit __init165#endif166#ifndef __devinitdata167#define __devinitdata168#endif169#ifndef __devexit_p170#define __devexit_p(x) x171#endif172173#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))174175#define pci_get_drvdata(dev) (dev)->sysdata176#define pci_set_drvdata(dev, value) (dev)->sysdata = (value)177178179180struct pci_device_id {181unsigned int vendor, device;182unsigned int subvendor, subdevice;183unsigned int class, class_mask;184unsigned long driver_data;185};186187struct pci_driver {188struct list_head node;189char *name;190const struct pci_device_id *id_table;191int (*probe)(struct pci_dev *dev,192const struct pci_device_id *id);193void (*remove)(struct pci_dev *dev);194void (*suspend)(struct pci_dev *dev);195void (*resume)(struct pci_dev *dev);196};197198#define MODULE_DEVICE_TABLE(type, name)199#define PCI_ANY_ID (~0)200201202#define pci_module_init pci_register_driver203extern int pci_register_driver(struct pci_driver *drv);204extern void pci_unregister_driver(struct pci_driver *drv);205206#endif207208#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))209#define pci_module_init pci_register_driver210#endif211212#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))213#ifdef MODULE214#define module_init(x) int init_module(void) { return x(); }215#define module_exit(x) void cleanup_module(void) { x(); }216#else217#define module_init(x) __initcall(x);218#define module_exit(x) __exitcall(x);219#endif220#endif221222#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)223#define WL_USE_NETDEV_OPS224#else225#undef WL_USE_NETDEV_OPS226#endif227228#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)229#define WL_CONFIG_RFKILL230#else231#undef WL_CONFIG_RFKILL232#endif233234#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))235#define list_for_each(pos, head) \236for (pos = (head)->next; pos != (head); pos = pos->next)237#endif238239#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))240#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)])241#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))242#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)243#endif244245#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))246#define pci_enable_device(dev) do { } while (0)247#endif248249#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))250#define net_device device251#endif252253#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))254255256257#ifndef PCI_DMA_TODEVICE258#define PCI_DMA_TODEVICE 1259#define PCI_DMA_FROMDEVICE 2260#endif261262typedef u32 dma_addr_t;263264265static inline int get_order(unsigned long size)266{267int order;268269size = (size-1) >> (PAGE_SHIFT-1);270order = -1;271do {272size >>= 1;273order++;274} while (size);275return order;276}277278static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,279dma_addr_t *dma_handle)280{281void *ret;282int gfp = GFP_ATOMIC | GFP_DMA;283284ret = (void *)__get_free_pages(gfp, get_order(size));285286if (ret != NULL) {287memset(ret, 0, size);288*dma_handle = virt_to_bus(ret);289}290return ret;291}292static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,293void *vaddr, dma_addr_t dma_handle)294{295free_pages((unsigned long)vaddr, get_order(size));296}297#define pci_map_single(cookie, address, size, dir) virt_to_bus(address)298#define pci_unmap_single(cookie, address, size, dir)299300#endif301302#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))303304#define dev_kfree_skb_any(a) dev_kfree_skb(a)305#define netif_down(dev) do { (dev)->start = 0; } while (0)306307308#ifndef _COMPAT_NETDEVICE_H309310311312#define dev_kfree_skb_irq(a) dev_kfree_skb(a)313#define netif_wake_queue(dev) \314do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0)315#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy)316317static inline void netif_start_queue(struct net_device *dev)318{319dev->tbusy = 0;320dev->interrupt = 0;321dev->start = 1;322}323324#define netif_queue_stopped(dev) (dev)->tbusy325#define netif_running(dev) (dev)->start326327#endif328329#define netif_device_attach(dev) netif_start_queue(dev)330#define netif_device_detach(dev) netif_stop_queue(dev)331332333#define tasklet_struct tq_struct334static inline void tasklet_schedule(struct tasklet_struct *tasklet)335{336queue_task(tasklet, &tq_immediate);337mark_bh(IMMEDIATE_BH);338}339340static inline void tasklet_init(struct tasklet_struct *tasklet,341void (*func)(unsigned long),342unsigned long data)343{344tasklet->next = NULL;345tasklet->sync = 0;346tasklet->routine = (void (*)(void *))func;347tasklet->data = (void *)data;348}349#define tasklet_kill(tasklet) { do {} while (0); }350351352#define del_timer_sync(timer) del_timer(timer)353354#else355356#define netif_down(dev)357358#endif359360#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))361362363#define PREPARE_TQUEUE(_tq, _routine, _data) \364do { \365(_tq)->routine = _routine; \366(_tq)->data = _data; \367} while (0)368369370#define INIT_TQUEUE(_tq, _routine, _data) \371do { \372INIT_LIST_HEAD(&(_tq)->list); \373(_tq)->sync = 0; \374PREPARE_TQUEUE((_tq), (_routine), (_data)); \375} while (0)376377#endif378379380#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)381#define PCI_SAVE_STATE(a, b) pci_save_state(a)382#define PCI_RESTORE_STATE(a, b) pci_restore_state(a)383#else384#define PCI_SAVE_STATE(a, b) pci_save_state(a, b)385#define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b)386#endif387388#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))389static inline int390pci_save_state(struct pci_dev *dev, u32 *buffer)391{392int i;393if (buffer) {394for (i = 0; i < 16; i++)395pci_read_config_dword(dev, i * 4, &buffer[i]);396}397return 0;398}399400static inline int401pci_restore_state(struct pci_dev *dev, u32 *buffer)402{403int i;404405if (buffer) {406for (i = 0; i < 16; i++)407pci_write_config_dword(dev, i * 4, buffer[i]);408}409410else {411for (i = 0; i < 6; i ++)412pci_write_config_dword(dev,413PCI_BASE_ADDRESS_0 + (i * 4),414pci_resource_start(dev, i));415pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);416}417return 0;418}419#endif420421422#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))423#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)424#endif425426427#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))428#ifndef SET_MODULE_OWNER429#define SET_MODULE_OWNER(dev) do {} while (0)430#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT431#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT432#else433#define OLD_MOD_INC_USE_COUNT do {} while (0)434#define OLD_MOD_DEC_USE_COUNT do {} while (0)435#endif436#else437#ifndef SET_MODULE_OWNER438#define SET_MODULE_OWNER(dev) do {} while (0)439#endif440#ifndef MOD_INC_USE_COUNT441#define MOD_INC_USE_COUNT do {} while (0)442#endif443#ifndef MOD_DEC_USE_COUNT444#define MOD_DEC_USE_COUNT do {} while (0)445#endif446#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT447#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT448#endif449450#ifndef SET_NETDEV_DEV451#define SET_NETDEV_DEV(net, pdev) do {} while (0)452#endif453454#ifndef HAVE_FREE_NETDEV455#define free_netdev(dev) kfree(dev)456#endif457458#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))459460#define af_packet_priv data461#endif462463464#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)465#define DRV_SUSPEND_STATE_TYPE pm_message_t466#else467#define DRV_SUSPEND_STATE_TYPE uint32468#endif469470#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)471#define CHECKSUM_HW CHECKSUM_PARTIAL472#endif473474typedef struct {475void *parent;476struct task_struct *p_task;477long thr_pid;478int prio;479struct semaphore sema;480int terminated;481struct completion completed;482} tsk_ctl_t;483484485486487#ifdef DHD_DEBUG488#define DBG_THR(x) printk x489#else490#define DBG_THR(x)491#endif492493#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))494#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)495#else496#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)497#endif498499500#define PROC_START(thread_func, owner, tsk_ctl, flags) \501{ \502sema_init(&((tsk_ctl)->sema), 0); \503init_completion(&((tsk_ctl)->completed)); \504(tsk_ctl)->parent = owner; \505(tsk_ctl)->terminated = FALSE; \506(tsk_ctl)->thr_pid = kernel_thread(thread_func, tsk_ctl, flags); \507if ((tsk_ctl)->thr_pid > 0) \508wait_for_completion(&((tsk_ctl)->completed)); \509DBG_THR(("%s thr:%lx started\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \510}511512#ifdef USE_KTHREAD_API513#define PROC_START2(thread_func, owner, tsk_ctl, flags, name) \514{ \515sema_init(&((tsk_ctl)->sema), 0); \516init_completion(&((tsk_ctl)->completed)); \517(tsk_ctl)->parent = owner; \518(tsk_ctl)->terminated = FALSE; \519(tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \520(tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \521DBG_THR(("%s thr:%lx created\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \522}523#endif524525#define PROC_STOP(tsk_ctl) \526{ \527(tsk_ctl)->terminated = TRUE; \528smp_wmb(); \529up(&((tsk_ctl)->sema)); \530wait_for_completion(&((tsk_ctl)->completed)); \531DBG_THR(("%s thr:%lx terminated OK\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \532(tsk_ctl)->thr_pid = -1; \533}534535536537#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))538#define KILL_PROC(nr, sig) \539{ \540struct task_struct *tsk; \541struct pid *pid; \542pid = find_get_pid((pid_t)nr); \543tsk = pid_task(pid, PIDTYPE_PID); \544if (tsk) send_sig(sig, tsk, 1); \545}546#else547#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \548KERNEL_VERSION(2, 6, 30))549#define KILL_PROC(pid, sig) \550{ \551struct task_struct *tsk; \552tsk = find_task_by_vpid(pid); \553if (tsk) send_sig(sig, tsk, 1); \554}555#else556#define KILL_PROC(pid, sig) \557{ \558kill_proc(pid, sig, 1); \559}560#endif561#endif562563#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))564#include <linux/time.h>565#include <linux/wait.h>566#else567#include <linux/sched.h>568569#define __wait_event_interruptible_timeout(wq, condition, ret) \570do { \571wait_queue_t __wait; \572init_waitqueue_entry(&__wait, current); \573\574add_wait_queue(&wq, &__wait); \575for (;;) { \576set_current_state(TASK_INTERRUPTIBLE); \577if (condition) \578break; \579if (!signal_pending(current)) { \580ret = schedule_timeout(ret); \581if (!ret) \582break; \583continue; \584} \585ret = -ERESTARTSYS; \586break; \587} \588current->state = TASK_RUNNING; \589remove_wait_queue(&wq, &__wait); \590} while (0)591592#define wait_event_interruptible_timeout(wq, condition, timeout) \593({ \594long __ret = timeout; \595if (!(condition)) \596__wait_event_interruptible_timeout(wq, condition, __ret); \597__ret; \598})599600#endif601602603#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))604#define DEV_PRIV(dev) (dev->priv)605#else606#define DEV_PRIV(dev) netdev_priv(dev)607#endif608609#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)610#define WL_ISR(i, d, p) wl_isr((i), (d))611#else612#define WL_ISR(i, d, p) wl_isr((i), (d), (p))613#endif614615#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))616#define netdev_priv(dev) dev->priv617#endif618619#endif620621622