/*1* This file is subject to the terms and conditions of the GNU General Public2* License. See the file "COPYING" in the main directory of this archive3* for more details.4*5* Copyright (C) 1994 by Waldorf Electronics6* Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle7* Copyright (C) 1999, 2000 Silicon Graphics, Inc.8* Copyright (C) 2007, 2014 Maciej W. Rozycki9*/10#include <linux/delay.h>11#include <linux/export.h>12#include <linux/param.h>13#include <linux/smp.h>14#include <linux/stringify.h>1516#include <asm/asm.h>17#include <asm/compiler.h>1819#ifndef CONFIG_CPU_DADDI_WORKAROUNDS20#define GCC_DADDI_IMM_ASM() "I"21#else22#define GCC_DADDI_IMM_ASM() "r"23#endif2425#ifndef CONFIG_HAVE_PLAT_DELAY2627void __delay(unsigned long loops)28{29__asm__ __volatile__ (30" .set noreorder \n"31" .align 3 \n"32"1: bnez %0, 1b \n"33" " __stringify(LONG_SUBU) " %0, %1 \n"34" .set reorder \n"35: "=r" (loops)36: GCC_DADDI_IMM_ASM() (1), "0" (loops));37}38EXPORT_SYMBOL(__delay);3940/*41* Division by multiplication: you don't have to worry about42* loss of precision.43*44* Use only for very small delays ( < 1 msec). Should probably use a45* lookup table, really, as the multiplications take much too long with46* short delays. This is a "reasonable" implementation, though (and the47* first constant multiplications gets optimized away if the delay is48* a constant)49*/5051void __udelay(unsigned long us)52{53unsigned int lpj = raw_current_cpu_data.udelay_val;5455__delay((us * 0x000010c7ull * HZ * lpj) >> 32);56}57EXPORT_SYMBOL(__udelay);5859void __ndelay(unsigned long ns)60{61unsigned int lpj = raw_current_cpu_data.udelay_val;6263__delay((ns * 0x00000005ull * HZ * lpj) >> 32);64}65EXPORT_SYMBOL(__ndelay);6667#endif686970