Print this page
    
OS-5192 need faster clock_gettime
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Joshua M. Clulow <jmc@joyent.com>
Reviewed by: Ryan Zezeski <ryan@zinascii.com>
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/intel/ia32/ml/i86_subr.s
          +++ new/usr/src/uts/intel/ia32/ml/i86_subr.s
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  
    | 
      ↓ open down ↓ | 
    15 lines elided | 
    
      ↑ open up ↑ | 
  
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  24   24   * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
  25   25   * Copyright (c) 2014 by Delphix. All rights reserved.
       26 + * Copyright 2016 Joyent, Inc.
  26   27   */
  27   28  
  28   29  /*
  29   30   *  Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
  30   31   *  Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
  31   32   *    All Rights Reserved
  32   33   */
  33   34  
  34   35  /*
  35   36   * Copyright (c) 2009, Intel Corporation.
  36   37   * All rights reserved.
  37   38   */
  38   39  
  39   40  /*
  40   41   * General assembly language routines.
  41   42   * It is the intent of this file to contain routines that are
  42   43   * independent of the specific kernel architecture, and those that are
  43   44   * common across kernel architectures.
  44   45   * As architectures diverge, and implementations of specific
  45   46   * architecture-dependent routines change, the routines should be moved
  46   47   * from this file into the respective ../`arch -k`/subr.s file.
  47   48   */
  48   49  
  49   50  #include <sys/asm_linkage.h>
  50   51  #include <sys/asm_misc.h>
  51   52  #include <sys/panic.h>
  52   53  #include <sys/ontrap.h>
  53   54  #include <sys/regset.h>
  54   55  #include <sys/privregs.h>
  55   56  #include <sys/reboot.h>
  56   57  #include <sys/psw.h>
  57   58  #include <sys/x86_archext.h>
  58   59  
  59   60  #if defined(__lint)
  60   61  #include <sys/types.h>
  61   62  #include <sys/systm.h>
  62   63  #include <sys/thread.h>
  63   64  #include <sys/archsystm.h>
  64   65  #include <sys/byteorder.h>
  65   66  #include <sys/dtrace.h>
  66   67  #include <sys/ftrace.h>
  67   68  #else   /* __lint */
  68   69  #include "assym.h"
  69   70  #endif  /* __lint */
  70   71  #include <sys/dditypes.h>
  71   72  
  72   73  /*
  73   74   * on_fault()
  74   75   *
  75   76   * Catch lofault faults. Like setjmp except it returns one
  76   77   * if code following causes uncorrectable fault. Turned off
  77   78   * by calling no_fault(). Note that while under on_fault(),
  78   79   * SMAP is disabled. For more information see
  79   80   * uts/intel/ia32/ml/copy.s.
  80   81   */
  81   82  
  82   83  #if defined(__lint)
  83   84  
  84   85  /* ARGSUSED */
  85   86  int
  86   87  on_fault(label_t *ljb)
  87   88  { return (0); }
  88   89  
  89   90  void
  90   91  no_fault(void)
  91   92  {}
  92   93  
  93   94  #else   /* __lint */
  94   95  
  95   96  #if defined(__amd64)
  96   97  
  97   98          ENTRY(on_fault)
  98   99          movq    %gs:CPU_THREAD, %rsi
  99  100          leaq    catch_fault(%rip), %rdx
 100  101          movq    %rdi, T_ONFAULT(%rsi)           /* jumpbuf in t_onfault */
 101  102          movq    %rdx, T_LOFAULT(%rsi)           /* catch_fault in t_lofault */
 102  103          call    smap_disable                    /* allow user accesses */
 103  104          jmp     setjmp                          /* let setjmp do the rest */
 104  105  
 105  106  catch_fault:
 106  107          movq    %gs:CPU_THREAD, %rsi
 107  108          movq    T_ONFAULT(%rsi), %rdi           /* address of save area */
 108  109          xorl    %eax, %eax
 109  110          movq    %rax, T_ONFAULT(%rsi)           /* turn off onfault */
 110  111          movq    %rax, T_LOFAULT(%rsi)           /* turn off lofault */
 111  112          call    smap_enable                     /* disallow user accesses */
 112  113          jmp     longjmp                         /* let longjmp do the rest */
 113  114          SET_SIZE(on_fault)
 114  115  
 115  116          ENTRY(no_fault)
 116  117          movq    %gs:CPU_THREAD, %rsi
 117  118          xorl    %eax, %eax
 118  119          movq    %rax, T_ONFAULT(%rsi)           /* turn off onfault */
 119  120          movq    %rax, T_LOFAULT(%rsi)           /* turn off lofault */
 120  121          call    smap_enable                     /* disallow user accesses */
 121  122          ret
 122  123          SET_SIZE(no_fault)
 123  124  
 124  125  #elif defined(__i386)
 125  126  
 126  127          ENTRY(on_fault)
 127  128          movl    %gs:CPU_THREAD, %edx
 128  129          movl    4(%esp), %eax                   /* jumpbuf address */
 129  130          leal    catch_fault, %ecx
 130  131          movl    %eax, T_ONFAULT(%edx)           /* jumpbuf in t_onfault */
 131  132          movl    %ecx, T_LOFAULT(%edx)           /* catch_fault in t_lofault */
 132  133          jmp     setjmp                          /* let setjmp do the rest */
 133  134  
 134  135  catch_fault:
 135  136          movl    %gs:CPU_THREAD, %edx
 136  137          xorl    %eax, %eax
 137  138          movl    T_ONFAULT(%edx), %ecx           /* address of save area */
 138  139          movl    %eax, T_ONFAULT(%edx)           /* turn off onfault */
 139  140          movl    %eax, T_LOFAULT(%edx)           /* turn off lofault */
 140  141          pushl   %ecx
 141  142          call    longjmp                         /* let longjmp do the rest */
 142  143          SET_SIZE(on_fault)
 143  144  
 144  145          ENTRY(no_fault)
 145  146          movl    %gs:CPU_THREAD, %edx
 146  147          xorl    %eax, %eax
 147  148          movl    %eax, T_ONFAULT(%edx)           /* turn off onfault */
 148  149          movl    %eax, T_LOFAULT(%edx)           /* turn off lofault */
 149  150          ret
 150  151          SET_SIZE(no_fault)
 151  152  
 152  153  #endif  /* __i386 */
 153  154  #endif  /* __lint */
 154  155  
 155  156  /*
 156  157   * Default trampoline code for on_trap() (see <sys/ontrap.h>).  We just
 157  158   * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
 158  159   */
 159  160  
 160  161  #if defined(lint)
 161  162  
 162  163  void
 163  164  on_trap_trampoline(void)
 164  165  {}
 165  166  
 166  167  #else   /* __lint */
 167  168  
 168  169  #if defined(__amd64)
 169  170  
 170  171          ENTRY(on_trap_trampoline)
 171  172          movq    %gs:CPU_THREAD, %rsi
 172  173          movq    T_ONTRAP(%rsi), %rdi
 173  174          addq    $OT_JMPBUF, %rdi
 174  175          jmp     longjmp
 175  176          SET_SIZE(on_trap_trampoline)
 176  177  
 177  178  #elif defined(__i386)
 178  179  
 179  180          ENTRY(on_trap_trampoline)
 180  181          movl    %gs:CPU_THREAD, %eax
 181  182          movl    T_ONTRAP(%eax), %eax
 182  183          addl    $OT_JMPBUF, %eax
 183  184          pushl   %eax
 184  185          call    longjmp
 185  186          SET_SIZE(on_trap_trampoline)
 186  187  
 187  188  #endif  /* __i386 */
 188  189  #endif  /* __lint */
 189  190  
 190  191  /*
 191  192   * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
 192  193   * more information about the on_trap() mechanism.  If the on_trap_data is the
 193  194   * same as the topmost stack element, we just modify that element.
 194  195   */
 195  196  #if defined(lint)
 196  197  
 197  198  /*ARGSUSED*/
 198  199  int
 199  200  on_trap(on_trap_data_t *otp, uint_t prot)
 200  201  { return (0); }
 201  202  
 202  203  #else   /* __lint */
 203  204  
 204  205  #if defined(__amd64)
 205  206  
 206  207          ENTRY(on_trap)
 207  208          movw    %si, OT_PROT(%rdi)              /* ot_prot = prot */
 208  209          movw    $0, OT_TRAP(%rdi)               /* ot_trap = 0 */
 209  210          leaq    on_trap_trampoline(%rip), %rdx  /* rdx = &on_trap_trampoline */
 210  211          movq    %rdx, OT_TRAMPOLINE(%rdi)       /* ot_trampoline = rdx */
 211  212          xorl    %ecx, %ecx
 212  213          movq    %rcx, OT_HANDLE(%rdi)           /* ot_handle = NULL */
 213  214          movq    %rcx, OT_PAD1(%rdi)             /* ot_pad1 = NULL */
 214  215          movq    %gs:CPU_THREAD, %rdx            /* rdx = curthread */
 215  216          movq    T_ONTRAP(%rdx), %rcx            /* rcx = curthread->t_ontrap */
 216  217          cmpq    %rdi, %rcx                      /* if (otp == %rcx)     */
 217  218          je      0f                              /*      don't modify t_ontrap */
 218  219  
 219  220          movq    %rcx, OT_PREV(%rdi)             /* ot_prev = t_ontrap */
 220  221          movq    %rdi, T_ONTRAP(%rdx)            /* curthread->t_ontrap = otp */
 221  222  
 222  223  0:      addq    $OT_JMPBUF, %rdi                /* &ot_jmpbuf */
 223  224          jmp     setjmp
 224  225          SET_SIZE(on_trap)
 225  226  
 226  227  #elif defined(__i386)
 227  228  
 228  229          ENTRY(on_trap)
 229  230          movl    4(%esp), %eax                   /* %eax = otp */
 230  231          movl    8(%esp), %edx                   /* %edx = prot */
 231  232  
 232  233          movw    %dx, OT_PROT(%eax)              /* ot_prot = prot */
 233  234          movw    $0, OT_TRAP(%eax)               /* ot_trap = 0 */
 234  235          leal    on_trap_trampoline, %edx        /* %edx = &on_trap_trampoline */
 235  236          movl    %edx, OT_TRAMPOLINE(%eax)       /* ot_trampoline = %edx */
 236  237          movl    $0, OT_HANDLE(%eax)             /* ot_handle = NULL */
 237  238          movl    $0, OT_PAD1(%eax)               /* ot_pad1 = NULL */
 238  239          movl    %gs:CPU_THREAD, %edx            /* %edx = curthread */
 239  240          movl    T_ONTRAP(%edx), %ecx            /* %ecx = curthread->t_ontrap */
 240  241          cmpl    %eax, %ecx                      /* if (otp == %ecx) */
 241  242          je      0f                              /*    don't modify t_ontrap */
 242  243  
 243  244          movl    %ecx, OT_PREV(%eax)             /* ot_prev = t_ontrap */
 244  245          movl    %eax, T_ONTRAP(%edx)            /* curthread->t_ontrap = otp */
 245  246  
 246  247  0:      addl    $OT_JMPBUF, %eax                /* %eax = &ot_jmpbuf */
 247  248          movl    %eax, 4(%esp)                   /* put %eax back on the stack */
 248  249          jmp     setjmp                          /* let setjmp do the rest */
 249  250          SET_SIZE(on_trap)
 250  251  
 251  252  #endif  /* __i386 */
 252  253  #endif  /* __lint */
 253  254  
 254  255  /*
 255  256   * Setjmp and longjmp implement non-local gotos using state vectors
 256  257   * type label_t.
 257  258   */
 258  259  
 259  260  #if defined(__lint)
 260  261  
 261  262  /* ARGSUSED */
 262  263  int
 263  264  setjmp(label_t *lp)
 264  265  { return (0); }
 265  266  
 266  267  /* ARGSUSED */
 267  268  void
 268  269  longjmp(label_t *lp)
 269  270  {}
 270  271  
 271  272  #else   /* __lint */
 272  273  
 273  274  #if LABEL_PC != 0
 274  275  #error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
 275  276  #endif  /* LABEL_PC != 0 */
 276  277  
 277  278  #if defined(__amd64)
 278  279  
 279  280          ENTRY(setjmp)
 280  281          movq    %rsp, LABEL_SP(%rdi)
 281  282          movq    %rbp, LABEL_RBP(%rdi)
 282  283          movq    %rbx, LABEL_RBX(%rdi)
 283  284          movq    %r12, LABEL_R12(%rdi)
 284  285          movq    %r13, LABEL_R13(%rdi)
 285  286          movq    %r14, LABEL_R14(%rdi)
 286  287          movq    %r15, LABEL_R15(%rdi)
 287  288          movq    (%rsp), %rdx            /* return address */
 288  289          movq    %rdx, (%rdi)            /* LABEL_PC is 0 */
 289  290          xorl    %eax, %eax              /* return 0 */
 290  291          ret
 291  292          SET_SIZE(setjmp)
 292  293  
 293  294          ENTRY(longjmp)
 294  295          movq    LABEL_SP(%rdi), %rsp
 295  296          movq    LABEL_RBP(%rdi), %rbp
 296  297          movq    LABEL_RBX(%rdi), %rbx
 297  298          movq    LABEL_R12(%rdi), %r12
 298  299          movq    LABEL_R13(%rdi), %r13
 299  300          movq    LABEL_R14(%rdi), %r14
 300  301          movq    LABEL_R15(%rdi), %r15
 301  302          movq    (%rdi), %rdx            /* return address; LABEL_PC is 0 */
 302  303          movq    %rdx, (%rsp)
 303  304          xorl    %eax, %eax
 304  305          incl    %eax                    /* return 1 */
 305  306          ret
 306  307          SET_SIZE(longjmp)
 307  308  
 308  309  #elif defined(__i386)
 309  310  
 310  311          ENTRY(setjmp)
 311  312          movl    4(%esp), %edx           /* address of save area */
 312  313          movl    %ebp, LABEL_EBP(%edx)
 313  314          movl    %ebx, LABEL_EBX(%edx)
 314  315          movl    %esi, LABEL_ESI(%edx)
 315  316          movl    %edi, LABEL_EDI(%edx)
 316  317          movl    %esp, 4(%edx)
 317  318          movl    (%esp), %ecx            /* %eip (return address) */
 318  319          movl    %ecx, (%edx)            /* LABEL_PC is 0 */
 319  320          subl    %eax, %eax              /* return 0 */
 320  321          ret
 321  322          SET_SIZE(setjmp)
 322  323  
 323  324          ENTRY(longjmp)
 324  325          movl    4(%esp), %edx           /* address of save area */
 325  326          movl    LABEL_EBP(%edx), %ebp
 326  327          movl    LABEL_EBX(%edx), %ebx
 327  328          movl    LABEL_ESI(%edx), %esi
 328  329          movl    LABEL_EDI(%edx), %edi
 329  330          movl    4(%edx), %esp
 330  331          movl    (%edx), %ecx            /* %eip (return addr); LABEL_PC is 0 */
 331  332          movl    $1, %eax
 332  333          addl    $4, %esp                /* pop ret adr */
 333  334          jmp     *%ecx                   /* indirect */
 334  335          SET_SIZE(longjmp)
 335  336  
 336  337  #endif  /* __i386 */
 337  338  #endif  /* __lint */
 338  339  
 339  340  /*
 340  341   * if a() calls b() calls caller(),
 341  342   * caller() returns return address in a().
 342  343   * (Note: We assume a() and b() are C routines which do the normal entry/exit
 343  344   *  sequence.)
 344  345   */
 345  346  
 346  347  #if defined(__lint)
 347  348  
 348  349  caddr_t
 349  350  caller(void)
 350  351  { return (0); }
 351  352  
 352  353  #else   /* __lint */
 353  354  
 354  355  #if defined(__amd64)
 355  356  
 356  357          ENTRY(caller)
 357  358          movq    8(%rbp), %rax           /* b()'s return pc, in a() */
 358  359          ret
 359  360          SET_SIZE(caller)
 360  361  
 361  362  #elif defined(__i386)
 362  363  
 363  364          ENTRY(caller)
 364  365          movl    4(%ebp), %eax           /* b()'s return pc, in a() */
 365  366          ret
 366  367          SET_SIZE(caller)
 367  368  
 368  369  #endif  /* __i386 */
 369  370  #endif  /* __lint */
 370  371  
 371  372  /*
 372  373   * if a() calls callee(), callee() returns the
 373  374   * return address in a();
 374  375   */
 375  376  
 376  377  #if defined(__lint)
 377  378  
 378  379  caddr_t
 379  380  callee(void)
 380  381  { return (0); }
 381  382  
 382  383  #else   /* __lint */
 383  384  
 384  385  #if defined(__amd64)
 385  386  
 386  387          ENTRY(callee)
 387  388          movq    (%rsp), %rax            /* callee()'s return pc, in a() */
 388  389          ret
 389  390          SET_SIZE(callee)
 390  391  
 391  392  #elif defined(__i386)
 392  393  
 393  394          ENTRY(callee)
 394  395          movl    (%esp), %eax            /* callee()'s return pc, in a() */
 395  396          ret
 396  397          SET_SIZE(callee)
 397  398  
 398  399  #endif  /* __i386 */
 399  400  #endif  /* __lint */
 400  401  
 401  402  /*
 402  403   * return the current frame pointer
 403  404   */
 404  405  
 405  406  #if defined(__lint)
 406  407  
 407  408  greg_t
 408  409  getfp(void)
 409  410  { return (0); }
 410  411  
 411  412  #else   /* __lint */
 412  413  
 413  414  #if defined(__amd64)
 414  415  
 415  416          ENTRY(getfp)
 416  417          movq    %rbp, %rax
 417  418          ret
 418  419          SET_SIZE(getfp)
 419  420  
 420  421  #elif defined(__i386)
 421  422  
 422  423          ENTRY(getfp)
 423  424          movl    %ebp, %eax
 424  425          ret
 425  426          SET_SIZE(getfp)
 426  427  
 427  428  #endif  /* __i386 */
 428  429  #endif  /* __lint */
 429  430  
 430  431  /*
 431  432   * Invalidate a single page table entry in the TLB
 432  433   */
 433  434  
 434  435  #if defined(__lint)
 435  436  
 436  437  /* ARGSUSED */
 437  438  void
 438  439  mmu_tlbflush_entry(caddr_t m)
 439  440  {}
 440  441  
 441  442  #else   /* __lint */
 442  443  
 443  444  #if defined(__amd64)
 444  445  
 445  446          ENTRY(mmu_tlbflush_entry)
 446  447          invlpg  (%rdi)
 447  448          ret
 448  449          SET_SIZE(mmu_tlbflush_entry)
 449  450  
 450  451  #elif defined(__i386)
 451  452  
 452  453          ENTRY(mmu_tlbflush_entry)
 453  454          movl    4(%esp), %eax
 454  455          invlpg  (%eax)
 455  456          ret
 456  457          SET_SIZE(mmu_tlbflush_entry)
 457  458  
 458  459  #endif  /* __i386 */
 459  460  #endif  /* __lint */
 460  461  
 461  462  
 462  463  /*
 463  464   * Get/Set the value of various control registers
 464  465   */
 465  466  
 466  467  #if defined(__lint)
 467  468  
 468  469  ulong_t
 469  470  getcr0(void)
 470  471  { return (0); }
 471  472  
 472  473  /* ARGSUSED */
 473  474  void
 474  475  setcr0(ulong_t value)
 475  476  {}
 476  477  
 477  478  ulong_t
 478  479  getcr2(void)
 479  480  { return (0); }
 480  481  
 481  482  ulong_t
 482  483  getcr3(void)
 483  484  { return (0); }
 484  485  
 485  486  #if !defined(__xpv)
 486  487  /* ARGSUSED */
 487  488  void
 488  489  setcr3(ulong_t val)
 489  490  {}
 490  491  
 491  492  void
 492  493  reload_cr3(void)
 493  494  {}
 494  495  #endif
 495  496  
 496  497  ulong_t
 497  498  getcr4(void)
 498  499  { return (0); }
 499  500  
 500  501  /* ARGSUSED */
 501  502  void
 502  503  setcr4(ulong_t val)
 503  504  {}
 504  505  
 505  506  #if defined(__amd64)
 506  507  
 507  508  ulong_t
 508  509  getcr8(void)
 509  510  { return (0); }
 510  511  
 511  512  /* ARGSUSED */
 512  513  void
 513  514  setcr8(ulong_t val)
 514  515  {}
 515  516  
 516  517  #endif  /* __amd64 */
 517  518  
 518  519  #else   /* __lint */
 519  520  
 520  521  #if defined(__amd64)
 521  522  
 522  523          ENTRY(getcr0)
 523  524          movq    %cr0, %rax
 524  525          ret
 525  526          SET_SIZE(getcr0)
 526  527  
 527  528          ENTRY(setcr0)
 528  529          movq    %rdi, %cr0
 529  530          ret
 530  531          SET_SIZE(setcr0)
 531  532  
 532  533          ENTRY(getcr2)
 533  534  #if defined(__xpv)
 534  535          movq    %gs:CPU_VCPU_INFO, %rax
 535  536          movq    VCPU_INFO_ARCH_CR2(%rax), %rax
 536  537  #else
 537  538          movq    %cr2, %rax
 538  539  #endif
 539  540          ret
 540  541          SET_SIZE(getcr2)
 541  542  
 542  543          ENTRY(getcr3)
 543  544          movq    %cr3, %rax
 544  545          ret
 545  546          SET_SIZE(getcr3)
 546  547  
 547  548  #if !defined(__xpv)
 548  549  
 549  550          ENTRY(setcr3)
 550  551          movq    %rdi, %cr3
 551  552          ret
 552  553          SET_SIZE(setcr3)
 553  554  
 554  555          ENTRY(reload_cr3)
 555  556          movq    %cr3, %rdi
 556  557          movq    %rdi, %cr3
 557  558          ret
 558  559          SET_SIZE(reload_cr3)
 559  560  
 560  561  #endif  /* __xpv */
 561  562  
 562  563          ENTRY(getcr4)
 563  564          movq    %cr4, %rax
 564  565          ret
 565  566          SET_SIZE(getcr4)
 566  567  
 567  568          ENTRY(setcr4)
 568  569          movq    %rdi, %cr4
 569  570          ret
 570  571          SET_SIZE(setcr4)
 571  572  
 572  573          ENTRY(getcr8)
 573  574          movq    %cr8, %rax
 574  575          ret
 575  576          SET_SIZE(getcr8)
 576  577  
 577  578          ENTRY(setcr8)
 578  579          movq    %rdi, %cr8
 579  580          ret
 580  581          SET_SIZE(setcr8)
 581  582  
 582  583  #elif defined(__i386)
 583  584  
 584  585          ENTRY(getcr0)
 585  586          movl    %cr0, %eax
 586  587          ret
 587  588          SET_SIZE(getcr0)
 588  589  
 589  590          ENTRY(setcr0)
 590  591          movl    4(%esp), %eax
 591  592          movl    %eax, %cr0
 592  593          ret
 593  594          SET_SIZE(setcr0)
 594  595  
 595  596          /*
 596  597           * "lock mov %cr0" is used on processors which indicate it is
 597  598           * supported via CPUID. Normally the 32 bit TPR is accessed via
 598  599           * the local APIC.
 599  600           */
 600  601          ENTRY(getcr8)
 601  602          lock
 602  603          movl    %cr0, %eax
 603  604          ret
 604  605          SET_SIZE(getcr8)
 605  606  
 606  607          ENTRY(setcr8)
 607  608          movl    4(%esp), %eax
 608  609          lock
 609  610          movl    %eax, %cr0
 610  611          ret
 611  612          SET_SIZE(setcr8)
 612  613  
 613  614          ENTRY(getcr2)
 614  615  #if defined(__xpv)
 615  616          movl    %gs:CPU_VCPU_INFO, %eax
 616  617          movl    VCPU_INFO_ARCH_CR2(%eax), %eax
 617  618  #else
 618  619          movl    %cr2, %eax
 619  620  #endif
 620  621          ret
 621  622          SET_SIZE(getcr2)
 622  623  
 623  624          ENTRY(getcr3)
 624  625          movl    %cr3, %eax
 625  626          ret
 626  627          SET_SIZE(getcr3)
 627  628  
 628  629  #if !defined(__xpv)
 629  630  
 630  631          ENTRY(setcr3)
 631  632          movl    4(%esp), %eax
 632  633          movl    %eax, %cr3
 633  634          ret
 634  635          SET_SIZE(setcr3)
 635  636  
 636  637          ENTRY(reload_cr3)
 637  638          movl    %cr3, %eax
 638  639          movl    %eax, %cr3
 639  640          ret
 640  641          SET_SIZE(reload_cr3)
 641  642  
 642  643  #endif  /* __xpv */
 643  644  
 644  645          ENTRY(getcr4)
 645  646          movl    %cr4, %eax
 646  647          ret
 647  648          SET_SIZE(getcr4)
 648  649  
 649  650          ENTRY(setcr4)
 650  651          movl    4(%esp), %eax
 651  652          movl    %eax, %cr4
 652  653          ret
 653  654          SET_SIZE(setcr4)
 654  655  
 655  656  #endif  /* __i386 */
 656  657  #endif  /* __lint */
 657  658  
 658  659  #if defined(__lint)
 659  660  
 660  661  /*ARGSUSED*/
 661  662  uint32_t
 662  663  __cpuid_insn(struct cpuid_regs *regs)
 663  664  { return (0); }
 664  665  
 665  666  #else   /* __lint */
 666  667  
 667  668  #if defined(__amd64)
 668  669  
 669  670          ENTRY(__cpuid_insn)
 670  671          movq    %rbx, %r8
 671  672          movq    %rcx, %r9
 672  673          movq    %rdx, %r11
 673  674          movl    (%rdi), %eax            /* %eax = regs->cp_eax */
 674  675          movl    0x4(%rdi), %ebx         /* %ebx = regs->cp_ebx */
 675  676          movl    0x8(%rdi), %ecx         /* %ecx = regs->cp_ecx */
 676  677          movl    0xc(%rdi), %edx         /* %edx = regs->cp_edx */
 677  678          cpuid
 678  679          movl    %eax, (%rdi)            /* regs->cp_eax = %eax */
 679  680          movl    %ebx, 0x4(%rdi)         /* regs->cp_ebx = %ebx */
 680  681          movl    %ecx, 0x8(%rdi)         /* regs->cp_ecx = %ecx */
 681  682          movl    %edx, 0xc(%rdi)         /* regs->cp_edx = %edx */
 682  683          movq    %r8, %rbx
 683  684          movq    %r9, %rcx
 684  685          movq    %r11, %rdx
 685  686          ret
 686  687          SET_SIZE(__cpuid_insn)
 687  688  
 688  689  #elif defined(__i386)
 689  690  
 690  691          ENTRY(__cpuid_insn)
 691  692          pushl   %ebp
 692  693          movl    0x8(%esp), %ebp         /* %ebp = regs */
 693  694          pushl   %ebx
 694  695          pushl   %ecx
 695  696          pushl   %edx
 696  697          movl    (%ebp), %eax            /* %eax = regs->cp_eax */
 697  698          movl    0x4(%ebp), %ebx         /* %ebx = regs->cp_ebx */
 698  699          movl    0x8(%ebp), %ecx         /* %ecx = regs->cp_ecx */
 699  700          movl    0xc(%ebp), %edx         /* %edx = regs->cp_edx */
 700  701          cpuid
 701  702          movl    %eax, (%ebp)            /* regs->cp_eax = %eax */
 702  703          movl    %ebx, 0x4(%ebp)         /* regs->cp_ebx = %ebx */
 703  704          movl    %ecx, 0x8(%ebp)         /* regs->cp_ecx = %ecx */
 704  705          movl    %edx, 0xc(%ebp)         /* regs->cp_edx = %edx */
 705  706          popl    %edx
 706  707          popl    %ecx
 707  708          popl    %ebx
 708  709          popl    %ebp
 709  710          ret
 710  711          SET_SIZE(__cpuid_insn)
 711  712  
 712  713  #endif  /* __i386 */
 713  714  #endif  /* __lint */
 714  715  
 715  716  #if defined(__lint)
 716  717  
 717  718  /*ARGSUSED*/
 718  719  void
 719  720  i86_monitor(volatile uint32_t *addr, uint32_t extensions, uint32_t hints)
 720  721  {}
 721  722  
 722  723  #else   /* __lint */
 723  724  
 724  725  #if defined(__amd64)
 725  726  
 726  727          ENTRY_NP(i86_monitor)
 727  728          pushq   %rbp
 728  729          movq    %rsp, %rbp
 729  730          movq    %rdi, %rax              /* addr */
 730  731          movq    %rsi, %rcx              /* extensions */
 731  732          /* rdx contains input arg3: hints */
 732  733          clflush (%rax)
 733  734          .byte   0x0f, 0x01, 0xc8        /* monitor */
 734  735          leave
 735  736          ret
 736  737          SET_SIZE(i86_monitor)
 737  738  
 738  739  #elif defined(__i386)
 739  740  
 740  741  ENTRY_NP(i86_monitor)
 741  742          pushl   %ebp
 742  743          movl    %esp, %ebp
 743  744          movl    0x8(%ebp),%eax          /* addr */
 744  745          movl    0xc(%ebp),%ecx          /* extensions */
 745  746          movl    0x10(%ebp),%edx         /* hints */
 746  747          clflush (%eax)
 747  748          .byte   0x0f, 0x01, 0xc8        /* monitor */
 748  749          leave
 749  750          ret
 750  751          SET_SIZE(i86_monitor)
 751  752  
 752  753  #endif  /* __i386 */
 753  754  #endif  /* __lint */
 754  755  
 755  756  #if defined(__lint)
 756  757  
 757  758  /*ARGSUSED*/
 758  759  void
 759  760  i86_mwait(uint32_t data, uint32_t extensions)
 760  761  {}
 761  762  
 762  763  #else   /* __lint */
 763  764  
 764  765  #if defined(__amd64)
 765  766  
 766  767          ENTRY_NP(i86_mwait)
 767  768          pushq   %rbp
 768  769          movq    %rsp, %rbp
 769  770          movq    %rdi, %rax              /* data */
 770  771          movq    %rsi, %rcx              /* extensions */
 771  772          .byte   0x0f, 0x01, 0xc9        /* mwait */
 772  773          leave
 773  774          ret
 774  775          SET_SIZE(i86_mwait)
 775  776  
 776  777  #elif defined(__i386)
 777  778  
 778  779          ENTRY_NP(i86_mwait)
 779  780          pushl   %ebp
 780  781          movl    %esp, %ebp
 781  782          movl    0x8(%ebp),%eax          /* data */
 782  783          movl    0xc(%ebp),%ecx          /* extensions */
 783  784          .byte   0x0f, 0x01, 0xc9        /* mwait */
 784  785          leave
 785  786          ret
 786  787          SET_SIZE(i86_mwait)
 787  788  
 788  789  #endif  /* __i386 */
 789  790  #endif  /* __lint */
 790  791  
 791  792  #if defined(__xpv)
 792  793          /*
 793  794           * Defined in C
 794  795           */
 795  796  #else
 796  797  
 797  798  #if defined(__lint)
 798  799  
 799  800  hrtime_t
 800  801  tsc_read(void)
 801  802  {
 802  803          return (0);
 803  804  }
 804  805  
 805  806  #else   /* __lint */
 806  807  
 807  808  #if defined(__amd64)
 808  809  
 809  810          ENTRY_NP(tsc_read)
 810  811          movq    %rbx, %r11
 811  812          movl    $0, %eax
 812  813          cpuid
 813  814          rdtsc
 814  815          movq    %r11, %rbx
 815  816          shlq    $32, %rdx
 816  817          orq     %rdx, %rax
 817  818          ret
 818  819          .globl _tsc_mfence_start
 819  820  _tsc_mfence_start:
 820  821          mfence
 821  822          rdtsc
 822  823          shlq    $32, %rdx
 823  824          orq     %rdx, %rax
 824  825          ret
 825  826          .globl _tsc_mfence_end
 826  827  _tsc_mfence_end:
 827  828          .globl _tscp_start
 828  829  _tscp_start:
 829  830          .byte   0x0f, 0x01, 0xf9        /* rdtscp instruction */
 830  831          shlq    $32, %rdx
 831  832          orq     %rdx, %rax
 832  833          ret
 833  834          .globl _tscp_end
 834  835  _tscp_end:
 835  836          .globl _no_rdtsc_start
 836  837  _no_rdtsc_start:
 837  838          xorl    %edx, %edx
 838  839          xorl    %eax, %eax
 839  840          ret
 840  841          .globl _no_rdtsc_end
 841  842  _no_rdtsc_end:
 842  843          .globl _tsc_lfence_start
 843  844  _tsc_lfence_start:
 844  845          lfence
 845  846          rdtsc
 846  847          shlq    $32, %rdx
 847  848          orq     %rdx, %rax
 848  849          ret
 849  850          .globl _tsc_lfence_end
 850  851  _tsc_lfence_end:
 851  852          SET_SIZE(tsc_read)
 852  853  
 853  854  #else /* __i386 */
 854  855  
 855  856          ENTRY_NP(tsc_read)
 856  857          pushl   %ebx
 857  858          movl    $0, %eax
 858  859          cpuid
 859  860          rdtsc
 860  861          popl    %ebx
 861  862          ret
 862  863          .globl _tsc_mfence_start
 863  864  _tsc_mfence_start:
 864  865          mfence
 865  866          rdtsc
 866  867          ret
 867  868          .globl _tsc_mfence_end
 868  869  _tsc_mfence_end:
 869  870          .globl  _tscp_start
 870  871  _tscp_start:
 871  872          .byte   0x0f, 0x01, 0xf9        /* rdtscp instruction */
 872  873          ret
 873  874          .globl _tscp_end
 874  875  _tscp_end:
 875  876          .globl _no_rdtsc_start
 876  877  _no_rdtsc_start:
 877  878          xorl    %edx, %edx
 878  879          xorl    %eax, %eax
 879  880          ret
 880  881          .globl _no_rdtsc_end
 881  882  _no_rdtsc_end:
 882  883          .globl _tsc_lfence_start
 883  884  _tsc_lfence_start:
 884  885          lfence
 885  886          rdtsc
 886  887          ret
 887  888          .globl _tsc_lfence_end
 888  889  _tsc_lfence_end:
 889  890          SET_SIZE(tsc_read)
 890  891  
 891  892  #endif  /* __i386 */
 892  893  
 893  894  #endif  /* __lint */
 894  895  
 895  896  
 896  897  #endif  /* __xpv */
 897  898  
 898  899  #ifdef __lint
 899  900  /*
 900  901   * Do not use this function for obtaining clock tick.  This
 901  902   * is called by callers who do not need to have a guarenteed
 902  903   * correct tick value.  The proper routine to use is tsc_read().
 903  904   */
 904  905  u_longlong_t
 905  906  randtick(void)
 906  907  {
 907  908          return (0);
 908  909  }
 909  910  #else
 910  911  #if defined(__amd64)
 911  912          ENTRY_NP(randtick)
 912  913          rdtsc
 913  914          shlq    $32, %rdx
 914  915          orq     %rdx, %rax
 915  916          ret
 916  917          SET_SIZE(randtick)
 917  918  #else
 918  919          ENTRY_NP(randtick)
 919  920          rdtsc
 920  921          ret
 921  922          SET_SIZE(randtick)
 922  923  #endif /* __i386 */
 923  924  #endif /* __lint */
 924  925  /*
 925  926   * Insert entryp after predp in a doubly linked list.
 926  927   */
 927  928  
 928  929  #if defined(__lint)
 929  930  
 930  931  /*ARGSUSED*/
 931  932  void
 932  933  _insque(caddr_t entryp, caddr_t predp)
 933  934  {}
 934  935  
 935  936  #else   /* __lint */
 936  937  
 937  938  #if defined(__amd64)
 938  939  
 939  940          ENTRY(_insque)
 940  941          movq    (%rsi), %rax            /* predp->forw                  */
 941  942          movq    %rsi, CPTRSIZE(%rdi)    /* entryp->back = predp         */
 942  943          movq    %rax, (%rdi)            /* entryp->forw = predp->forw   */
 943  944          movq    %rdi, (%rsi)            /* predp->forw = entryp         */
 944  945          movq    %rdi, CPTRSIZE(%rax)    /* predp->forw->back = entryp   */
 945  946          ret
 946  947          SET_SIZE(_insque)
 947  948  
 948  949  #elif defined(__i386)
 949  950  
 950  951          ENTRY(_insque)
 951  952          movl    8(%esp), %edx
 952  953          movl    4(%esp), %ecx
 953  954          movl    (%edx), %eax            /* predp->forw                  */
 954  955          movl    %edx, CPTRSIZE(%ecx)    /* entryp->back = predp         */
 955  956          movl    %eax, (%ecx)            /* entryp->forw = predp->forw   */
 956  957          movl    %ecx, (%edx)            /* predp->forw = entryp         */
 957  958          movl    %ecx, CPTRSIZE(%eax)    /* predp->forw->back = entryp   */
 958  959          ret
 959  960          SET_SIZE(_insque)
 960  961  
 961  962  #endif  /* __i386 */
 962  963  #endif  /* __lint */
 963  964  
 964  965  /*
 965  966   * Remove entryp from a doubly linked list
 966  967   */
 967  968  
 968  969  #if defined(__lint)
 969  970  
 970  971  /*ARGSUSED*/
 971  972  void
 972  973  _remque(caddr_t entryp)
 973  974  {}
 974  975  
 975  976  #else   /* __lint */
 976  977  
 977  978  #if defined(__amd64)
 978  979  
 979  980          ENTRY(_remque)
 980  981          movq    (%rdi), %rax            /* entry->forw */
 981  982          movq    CPTRSIZE(%rdi), %rdx    /* entry->back */
 982  983          movq    %rax, (%rdx)            /* entry->back->forw = entry->forw */
 983  984          movq    %rdx, CPTRSIZE(%rax)    /* entry->forw->back = entry->back */
 984  985          ret
 985  986          SET_SIZE(_remque)
 986  987  
 987  988  #elif defined(__i386)
 988  989  
 989  990          ENTRY(_remque)
 990  991          movl    4(%esp), %ecx
 991  992          movl    (%ecx), %eax            /* entry->forw */
 992  993          movl    CPTRSIZE(%ecx), %edx    /* entry->back */
 993  994          movl    %eax, (%edx)            /* entry->back->forw = entry->forw */
 994  995          movl    %edx, CPTRSIZE(%eax)    /* entry->forw->back = entry->back */
 995  996          ret
 996  997          SET_SIZE(_remque)
 997  998  
 998  999  #endif  /* __i386 */
 999 1000  #endif  /* __lint */
1000 1001  
1001 1002  /*
1002 1003   * Returns the number of
1003 1004   * non-NULL bytes in string argument.
1004 1005   */
1005 1006  
1006 1007  #if defined(__lint)
1007 1008  
1008 1009  /* ARGSUSED */
1009 1010  size_t
1010 1011  strlen(const char *str)
1011 1012  { return (0); }
1012 1013  
1013 1014  #else   /* __lint */
1014 1015  
1015 1016  #if defined(__amd64)
1016 1017  
1017 1018  /*
1018 1019   * This is close to a simple transliteration of a C version of this
1019 1020   * routine.  We should either just -make- this be a C version, or
1020 1021   * justify having it in assembler by making it significantly faster.
1021 1022   *
1022 1023   * size_t
1023 1024   * strlen(const char *s)
1024 1025   * {
1025 1026   *      const char *s0;
1026 1027   * #if defined(DEBUG)
1027 1028   *      if ((uintptr_t)s < KERNELBASE)
1028 1029   *              panic(.str_panic_msg);
1029 1030   * #endif
1030 1031   *      for (s0 = s; *s; s++)
1031 1032   *              ;
1032 1033   *      return (s - s0);
1033 1034   * }
1034 1035   */
1035 1036  
1036 1037          ENTRY(strlen)
1037 1038  #ifdef DEBUG
1038 1039          movq    postbootkernelbase(%rip), %rax
1039 1040          cmpq    %rax, %rdi
1040 1041          jae     str_valid
1041 1042          pushq   %rbp
1042 1043          movq    %rsp, %rbp
1043 1044          leaq    .str_panic_msg(%rip), %rdi
1044 1045          xorl    %eax, %eax
1045 1046          call    panic
1046 1047  #endif  /* DEBUG */
1047 1048  str_valid:
1048 1049          cmpb    $0, (%rdi)
1049 1050          movq    %rdi, %rax
1050 1051          je      .null_found
1051 1052          .align  4
1052 1053  .strlen_loop:
1053 1054          incq    %rdi
1054 1055          cmpb    $0, (%rdi)
1055 1056          jne     .strlen_loop
1056 1057  .null_found:
1057 1058          subq    %rax, %rdi
1058 1059          movq    %rdi, %rax
1059 1060          ret
1060 1061          SET_SIZE(strlen)
1061 1062  
1062 1063  #elif defined(__i386)
1063 1064  
1064 1065          ENTRY(strlen)
1065 1066  #ifdef DEBUG
1066 1067          movl    postbootkernelbase, %eax
1067 1068          cmpl    %eax, 4(%esp)
1068 1069          jae     str_valid
1069 1070          pushl   %ebp
1070 1071          movl    %esp, %ebp
1071 1072          pushl   $.str_panic_msg
1072 1073          call    panic
1073 1074  #endif /* DEBUG */
1074 1075  
1075 1076  str_valid:
1076 1077          movl    4(%esp), %eax           /* %eax = string address */
1077 1078          testl   $3, %eax                /* if %eax not word aligned */
1078 1079          jnz     .not_word_aligned       /* goto .not_word_aligned */
1079 1080          .align  4
1080 1081  .word_aligned:
1081 1082          movl    (%eax), %edx            /* move 1 word from (%eax) to %edx */
1082 1083          movl    $0x7f7f7f7f, %ecx
1083 1084          andl    %edx, %ecx              /* %ecx = %edx & 0x7f7f7f7f */
1084 1085          addl    $4, %eax                /* next word */
1085 1086          addl    $0x7f7f7f7f, %ecx       /* %ecx += 0x7f7f7f7f */
1086 1087          orl     %edx, %ecx              /* %ecx |= %edx */
1087 1088          andl    $0x80808080, %ecx       /* %ecx &= 0x80808080 */
1088 1089          cmpl    $0x80808080, %ecx       /* if no null byte in this word */
1089 1090          je      .word_aligned           /* goto .word_aligned */
1090 1091          subl    $4, %eax                /* post-incremented */
1091 1092  .not_word_aligned:
1092 1093          cmpb    $0, (%eax)              /* if a byte in (%eax) is null */
1093 1094          je      .null_found             /* goto .null_found */
1094 1095          incl    %eax                    /* next byte */
1095 1096          testl   $3, %eax                /* if %eax not word aligned */
1096 1097          jnz     .not_word_aligned       /* goto .not_word_aligned */
1097 1098          jmp     .word_aligned           /* goto .word_aligned */
1098 1099          .align  4
1099 1100  .null_found:
1100 1101          subl    4(%esp), %eax           /* %eax -= string address */
1101 1102          ret
1102 1103          SET_SIZE(strlen)
1103 1104  
1104 1105  #endif  /* __i386 */
1105 1106  
1106 1107  #ifdef DEBUG
1107 1108          .text
1108 1109  .str_panic_msg:
1109 1110          .string "strlen: argument below kernelbase"
1110 1111  #endif /* DEBUG */
1111 1112  
1112 1113  #endif  /* __lint */
1113 1114  
1114 1115          /*
1115 1116           * Berkeley 4.3 introduced symbolically named interrupt levels
1116 1117           * as a way deal with priority in a machine independent fashion.
1117 1118           * Numbered priorities are machine specific, and should be
1118 1119           * discouraged where possible.
1119 1120           *
1120 1121           * Note, for the machine specific priorities there are
1121 1122           * examples listed for devices that use a particular priority.
1122 1123           * It should not be construed that all devices of that
1123 1124           * type should be at that priority.  It is currently were
1124 1125           * the current devices fit into the priority scheme based
1125 1126           * upon time criticalness.
1126 1127           *
1127 1128           * The underlying assumption of these assignments is that
1128 1129           * IPL 10 is the highest level from which a device
1129 1130           * routine can call wakeup.  Devices that interrupt from higher
1130 1131           * levels are restricted in what they can do.  If they need
1131 1132           * kernels services they should schedule a routine at a lower
1132 1133           * level (via software interrupt) to do the required
1133 1134           * processing.
1134 1135           *
1135 1136           * Examples of this higher usage:
1136 1137           *      Level   Usage
1137 1138           *      14      Profiling clock (and PROM uart polling clock)
1138 1139           *      12      Serial ports
1139 1140           *
1140 1141           * The serial ports request lower level processing on level 6.
1141 1142           *
1142 1143           * Also, almost all splN routines (where N is a number or a
1143 1144           * mnemonic) will do a RAISE(), on the assumption that they are
1144 1145           * never used to lower our priority.
1145 1146           * The exceptions are:
1146 1147           *      spl8()          Because you can't be above 15 to begin with!
1147 1148           *      splzs()         Because this is used at boot time to lower our
1148 1149           *                      priority, to allow the PROM to poll the uart.
1149 1150           *      spl0()          Used to lower priority to 0.
1150 1151           */
1151 1152  
1152 1153  #if defined(__lint)
1153 1154  
1154 1155  int spl0(void)          { return (0); }
1155 1156  int spl6(void)          { return (0); }
1156 1157  int spl7(void)          { return (0); }
1157 1158  int spl8(void)          { return (0); }
1158 1159  int splhigh(void)       { return (0); }
1159 1160  int splhi(void)         { return (0); }
1160 1161  int splzs(void)         { return (0); }
1161 1162  
1162 1163  /* ARGSUSED */
1163 1164  void
1164 1165  splx(int level)
1165 1166  {}
1166 1167  
1167 1168  #else   /* __lint */
1168 1169  
1169 1170  #if defined(__amd64)
1170 1171  
1171 1172  #define SETPRI(level) \
1172 1173          movl    $/**/level, %edi;       /* new priority */              \
1173 1174          jmp     do_splx                 /* redirect to do_splx */
1174 1175  
1175 1176  #define RAISE(level) \
1176 1177          movl    $/**/level, %edi;       /* new priority */              \
1177 1178          jmp     splr                    /* redirect to splr */
1178 1179  
1179 1180  #elif defined(__i386)
1180 1181  
1181 1182  #define SETPRI(level) \
1182 1183          pushl   $/**/level;     /* new priority */                      \
1183 1184          call    do_splx;        /* invoke common splx code */           \
1184 1185          addl    $4, %esp;       /* unstack arg */                       \
1185 1186          ret
1186 1187  
1187 1188  #define RAISE(level) \
1188 1189          pushl   $/**/level;     /* new priority */                      \
1189 1190          call    splr;           /* invoke common splr code */           \
1190 1191          addl    $4, %esp;       /* unstack args */                      \
1191 1192          ret
1192 1193  
1193 1194  #endif  /* __i386 */
1194 1195  
1195 1196          /* locks out all interrupts, including memory errors */
1196 1197          ENTRY(spl8)
1197 1198          SETPRI(15)
1198 1199          SET_SIZE(spl8)
1199 1200  
1200 1201          /* just below the level that profiling runs */
1201 1202          ENTRY(spl7)
1202 1203          RAISE(13)
1203 1204          SET_SIZE(spl7)
1204 1205  
1205 1206          /* sun specific - highest priority onboard serial i/o asy ports */
1206 1207          ENTRY(splzs)
1207 1208          SETPRI(12)      /* Can't be a RAISE, as it's used to lower us */
1208 1209          SET_SIZE(splzs)
1209 1210  
1210 1211          ENTRY(splhi)
1211 1212          ALTENTRY(splhigh)
1212 1213          ALTENTRY(spl6)
1213 1214          ALTENTRY(i_ddi_splhigh)
1214 1215  
1215 1216          RAISE(DISP_LEVEL)
1216 1217  
1217 1218          SET_SIZE(i_ddi_splhigh)
1218 1219          SET_SIZE(spl6)
1219 1220          SET_SIZE(splhigh)
1220 1221          SET_SIZE(splhi)
1221 1222  
1222 1223          /* allow all interrupts */
1223 1224          ENTRY(spl0)
1224 1225          SETPRI(0)
1225 1226          SET_SIZE(spl0)
1226 1227  
1227 1228  
1228 1229          /* splx implementation */
1229 1230          ENTRY(splx)
1230 1231          jmp     do_splx         /* redirect to common splx code */
1231 1232          SET_SIZE(splx)
1232 1233  
1233 1234  #endif  /* __lint */
1234 1235  
1235 1236  #if defined(__i386)
1236 1237  
1237 1238  /*
1238 1239   * Read and write the %gs register
1239 1240   */
1240 1241  
1241 1242  #if defined(__lint)
1242 1243  
1243 1244  /*ARGSUSED*/
1244 1245  uint16_t
1245 1246  getgs(void)
1246 1247  { return (0); }
1247 1248  
1248 1249  /*ARGSUSED*/
1249 1250  void
1250 1251  setgs(uint16_t sel)
1251 1252  {}
1252 1253  
1253 1254  #else   /* __lint */
1254 1255  
1255 1256          ENTRY(getgs)
1256 1257          clr     %eax
1257 1258          movw    %gs, %ax
1258 1259          ret
1259 1260          SET_SIZE(getgs)
1260 1261  
1261 1262          ENTRY(setgs)
1262 1263          movw    4(%esp), %gs
1263 1264          ret
1264 1265          SET_SIZE(setgs)
1265 1266  
1266 1267  #endif  /* __lint */
1267 1268  #endif  /* __i386 */
1268 1269  
1269 1270  #if defined(__lint)
1270 1271  
1271 1272  void
1272 1273  pc_reset(void)
1273 1274  {}
1274 1275  
1275 1276  void
1276 1277  efi_reset(void)
1277 1278  {}
1278 1279  
1279 1280  #else   /* __lint */
1280 1281  
1281 1282          ENTRY(wait_500ms)
1282 1283  #if defined(__amd64)
1283 1284          pushq   %rbx
1284 1285  #elif defined(__i386)
1285 1286          push    %ebx
1286 1287  #endif
1287 1288          movl    $50000, %ebx
1288 1289  1:
1289 1290          call    tenmicrosec
1290 1291          decl    %ebx
1291 1292          jnz     1b
1292 1293  #if defined(__amd64)
1293 1294          popq    %rbx
1294 1295  #elif defined(__i386)
1295 1296          pop     %ebx
1296 1297  #endif
1297 1298          ret     
1298 1299          SET_SIZE(wait_500ms)
1299 1300  
1300 1301  #define RESET_METHOD_KBC        1
1301 1302  #define RESET_METHOD_PORT92     2
1302 1303  #define RESET_METHOD_PCI        4
1303 1304  
1304 1305          DGDEF3(pc_reset_methods, 4, 8)
1305 1306          .long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
1306 1307  
1307 1308          ENTRY(pc_reset)
1308 1309  
1309 1310  #if defined(__i386)
1310 1311          testl   $RESET_METHOD_KBC, pc_reset_methods
1311 1312  #elif defined(__amd64)
1312 1313          testl   $RESET_METHOD_KBC, pc_reset_methods(%rip)
1313 1314  #endif
1314 1315          jz      1f
1315 1316  
1316 1317          /
1317 1318          / Try the classic keyboard controller-triggered reset.
1318 1319          /
1319 1320          movw    $0x64, %dx
1320 1321          movb    $0xfe, %al
1321 1322          outb    (%dx)
1322 1323  
1323 1324          / Wait up to 500 milliseconds here for the keyboard controller
1324 1325          / to pull the reset line.  On some systems where the keyboard
1325 1326          / controller is slow to pull the reset line, the next reset method
1326 1327          / may be executed (which may be bad if those systems hang when the
1327 1328          / next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
1328 1329          / and Ferrari 4000 (doesn't like the cf9 reset method))
1329 1330  
1330 1331          call    wait_500ms
1331 1332  
1332 1333  1:
1333 1334  #if defined(__i386)
1334 1335          testl   $RESET_METHOD_PORT92, pc_reset_methods
1335 1336  #elif defined(__amd64)
1336 1337          testl   $RESET_METHOD_PORT92, pc_reset_methods(%rip)
1337 1338  #endif
1338 1339          jz      3f
1339 1340  
1340 1341          /
1341 1342          / Try port 0x92 fast reset
1342 1343          /
1343 1344          movw    $0x92, %dx
1344 1345          inb     (%dx)
1345 1346          cmpb    $0xff, %al      / If port's not there, we should get back 0xFF
1346 1347          je      1f
1347 1348          testb   $1, %al         / If bit 0
1348 1349          jz      2f              / is clear, jump to perform the reset
1349 1350          andb    $0xfe, %al      / otherwise,
1350 1351          outb    (%dx)           / clear bit 0 first, then
1351 1352  2:
1352 1353          orb     $1, %al         / Set bit 0
1353 1354          outb    (%dx)           / and reset the system
1354 1355  1:
1355 1356  
1356 1357          call    wait_500ms
1357 1358  
1358 1359  3:
1359 1360  #if defined(__i386)
1360 1361          testl   $RESET_METHOD_PCI, pc_reset_methods
1361 1362  #elif defined(__amd64)
1362 1363          testl   $RESET_METHOD_PCI, pc_reset_methods(%rip)
1363 1364  #endif
1364 1365          jz      4f
1365 1366  
1366 1367          / Try the PCI (soft) reset vector (should work on all modern systems,
1367 1368          / but has been shown to cause problems on 450NX systems, and some newer
1368 1369          / systems (e.g. ATI IXP400-equipped systems))
1369 1370          / When resetting via this method, 2 writes are required.  The first
1370 1371          / targets bit 1 (0=hard reset without power cycle, 1=hard reset with
1371 1372          / power cycle).
1372 1373          / The reset occurs on the second write, during bit 2's transition from
1373 1374          / 0->1.
1374 1375          movw    $0xcf9, %dx
1375 1376          movb    $0x2, %al       / Reset mode = hard, no power cycle
1376 1377          outb    (%dx)
1377 1378          movb    $0x6, %al
1378 1379          outb    (%dx)
1379 1380  
1380 1381          call    wait_500ms
1381 1382  
1382 1383  4:
1383 1384          /
1384 1385          / port 0xcf9 failed also.  Last-ditch effort is to
1385 1386          / triple-fault the CPU.
1386 1387          / Also, use triple fault for EFI firmware
1387 1388          /
1388 1389          ENTRY(efi_reset)
1389 1390  #if defined(__amd64)
1390 1391          pushq   $0x0
1391 1392          pushq   $0x0            / IDT base of 0, limit of 0 + 2 unused bytes
1392 1393          lidt    (%rsp)
1393 1394  #elif defined(__i386)
1394 1395          pushl   $0x0
1395 1396          pushl   $0x0            / IDT base of 0, limit of 0 + 2 unused bytes
1396 1397          lidt    (%esp)
1397 1398  #endif
1398 1399          int     $0x0            / Trigger interrupt, generate triple-fault
1399 1400  
1400 1401          cli
1401 1402          hlt                     / Wait forever
1402 1403          /*NOTREACHED*/
1403 1404          SET_SIZE(efi_reset)
1404 1405          SET_SIZE(pc_reset)
1405 1406  
1406 1407  #endif  /* __lint */
1407 1408  
1408 1409  /*
1409 1410   * C callable in and out routines
1410 1411   */
1411 1412  
1412 1413  #if defined(__lint)
1413 1414  
1414 1415  /* ARGSUSED */
1415 1416  void
1416 1417  outl(int port_address, uint32_t val)
1417 1418  {}
1418 1419  
1419 1420  #else   /* __lint */
1420 1421  
1421 1422  #if defined(__amd64)
1422 1423  
1423 1424          ENTRY(outl)
1424 1425          movw    %di, %dx
1425 1426          movl    %esi, %eax
1426 1427          outl    (%dx)
1427 1428          ret
1428 1429          SET_SIZE(outl)
1429 1430  
1430 1431  #elif defined(__i386)
1431 1432  
1432 1433          .set    PORT, 4
1433 1434          .set    VAL, 8
1434 1435  
1435 1436          ENTRY(outl)
1436 1437          movw    PORT(%esp), %dx
1437 1438          movl    VAL(%esp), %eax
1438 1439          outl    (%dx)
1439 1440          ret
1440 1441          SET_SIZE(outl)
1441 1442  
1442 1443  #endif  /* __i386 */
1443 1444  #endif  /* __lint */
1444 1445  
1445 1446  #if defined(__lint)
1446 1447  
1447 1448  /* ARGSUSED */
1448 1449  void
1449 1450  outw(int port_address, uint16_t val)
1450 1451  {}
1451 1452  
1452 1453  #else   /* __lint */
1453 1454  
1454 1455  #if defined(__amd64)
1455 1456  
1456 1457          ENTRY(outw)
1457 1458          movw    %di, %dx
1458 1459          movw    %si, %ax
1459 1460          D16 outl (%dx)          /* XX64 why not outw? */
1460 1461          ret
1461 1462          SET_SIZE(outw)
1462 1463  
1463 1464  #elif defined(__i386)
1464 1465  
1465 1466          ENTRY(outw)
1466 1467          movw    PORT(%esp), %dx
1467 1468          movw    VAL(%esp), %ax
1468 1469          D16 outl (%dx)
1469 1470          ret
1470 1471          SET_SIZE(outw)
1471 1472  
1472 1473  #endif  /* __i386 */
1473 1474  #endif  /* __lint */
1474 1475  
1475 1476  #if defined(__lint)
1476 1477  
1477 1478  /* ARGSUSED */
1478 1479  void
1479 1480  outb(int port_address, uint8_t val)
1480 1481  {}
1481 1482  
1482 1483  #else   /* __lint */
1483 1484  
1484 1485  #if defined(__amd64)
1485 1486  
1486 1487          ENTRY(outb)
1487 1488          movw    %di, %dx
1488 1489          movb    %sil, %al
1489 1490          outb    (%dx)
1490 1491          ret
1491 1492          SET_SIZE(outb)
1492 1493  
1493 1494  #elif defined(__i386)
1494 1495  
1495 1496          ENTRY(outb)
1496 1497          movw    PORT(%esp), %dx
1497 1498          movb    VAL(%esp), %al
1498 1499          outb    (%dx)
1499 1500          ret
1500 1501          SET_SIZE(outb)
1501 1502  
1502 1503  #endif  /* __i386 */
1503 1504  #endif  /* __lint */
1504 1505  
1505 1506  #if defined(__lint)
1506 1507  
1507 1508  /* ARGSUSED */
1508 1509  uint32_t
1509 1510  inl(int port_address)
1510 1511  { return (0); }
1511 1512  
1512 1513  #else   /* __lint */
1513 1514  
1514 1515  #if defined(__amd64)
1515 1516  
1516 1517          ENTRY(inl)
1517 1518          xorl    %eax, %eax
1518 1519          movw    %di, %dx
1519 1520          inl     (%dx)
1520 1521          ret
1521 1522          SET_SIZE(inl)
1522 1523  
1523 1524  #elif defined(__i386)
1524 1525  
1525 1526          ENTRY(inl)
1526 1527          movw    PORT(%esp), %dx
1527 1528          inl     (%dx)
1528 1529          ret
1529 1530          SET_SIZE(inl)
1530 1531  
1531 1532  #endif  /* __i386 */
1532 1533  #endif  /* __lint */
1533 1534  
1534 1535  #if defined(__lint)
1535 1536  
1536 1537  /* ARGSUSED */
1537 1538  uint16_t
1538 1539  inw(int port_address)
1539 1540  { return (0); }
1540 1541  
1541 1542  #else   /* __lint */
1542 1543  
1543 1544  #if defined(__amd64)
1544 1545  
1545 1546          ENTRY(inw)
1546 1547          xorl    %eax, %eax
1547 1548          movw    %di, %dx
1548 1549          D16 inl (%dx)
1549 1550          ret
1550 1551          SET_SIZE(inw)
1551 1552  
1552 1553  #elif defined(__i386)
1553 1554  
1554 1555          ENTRY(inw)
1555 1556          subl    %eax, %eax
1556 1557          movw    PORT(%esp), %dx
1557 1558          D16 inl (%dx)
1558 1559          ret
1559 1560          SET_SIZE(inw)
1560 1561  
1561 1562  #endif  /* __i386 */
1562 1563  #endif  /* __lint */
1563 1564  
1564 1565  
1565 1566  #if defined(__lint)
1566 1567  
1567 1568  /* ARGSUSED */
1568 1569  uint8_t
1569 1570  inb(int port_address)
1570 1571  { return (0); }
1571 1572  
1572 1573  #else   /* __lint */
1573 1574  
1574 1575  #if defined(__amd64)
1575 1576  
1576 1577          ENTRY(inb)
1577 1578          xorl    %eax, %eax
1578 1579          movw    %di, %dx
1579 1580          inb     (%dx)
1580 1581          ret
1581 1582          SET_SIZE(inb)
1582 1583  
1583 1584  #elif defined(__i386)
1584 1585  
1585 1586          ENTRY(inb)
1586 1587          subl    %eax, %eax
1587 1588          movw    PORT(%esp), %dx
1588 1589          inb     (%dx)
1589 1590          ret
1590 1591          SET_SIZE(inb)
1591 1592  
1592 1593  #endif  /* __i386 */
1593 1594  #endif  /* __lint */
1594 1595  
1595 1596  
1596 1597  #if defined(__lint)
1597 1598  
1598 1599  /* ARGSUSED */
1599 1600  void
1600 1601  repoutsw(int port, uint16_t *addr, int cnt)
1601 1602  {}
1602 1603  
1603 1604  #else   /* __lint */
1604 1605  
1605 1606  #if defined(__amd64)
1606 1607  
1607 1608          ENTRY(repoutsw)
1608 1609          movl    %edx, %ecx
1609 1610          movw    %di, %dx
1610 1611          rep
1611 1612            D16 outsl
1612 1613          ret
1613 1614          SET_SIZE(repoutsw)
1614 1615  
1615 1616  #elif defined(__i386)
1616 1617  
1617 1618          /*
1618 1619           * The arguments and saved registers are on the stack in the
1619 1620           *  following order:
1620 1621           *      |  cnt  |  +16
1621 1622           *      | *addr |  +12
1622 1623           *      | port  |  +8
1623 1624           *      |  eip  |  +4
1624 1625           *      |  esi  |  <-- %esp
1625 1626           * If additional values are pushed onto the stack, make sure
1626 1627           * to adjust the following constants accordingly.
1627 1628           */
1628 1629          .set    PORT, 8
1629 1630          .set    ADDR, 12
1630 1631          .set    COUNT, 16
1631 1632  
1632 1633          ENTRY(repoutsw)
1633 1634          pushl   %esi
1634 1635          movl    PORT(%esp), %edx
1635 1636          movl    ADDR(%esp), %esi
1636 1637          movl    COUNT(%esp), %ecx
1637 1638          rep
1638 1639            D16 outsl
1639 1640          popl    %esi
1640 1641          ret
1641 1642          SET_SIZE(repoutsw)
1642 1643  
1643 1644  #endif  /* __i386 */
1644 1645  #endif  /* __lint */
1645 1646  
1646 1647  
1647 1648  #if defined(__lint)
1648 1649  
1649 1650  /* ARGSUSED */
1650 1651  void
1651 1652  repinsw(int port_addr, uint16_t *addr, int cnt)
1652 1653  {}
1653 1654  
1654 1655  #else   /* __lint */
1655 1656  
1656 1657  #if defined(__amd64)
1657 1658  
1658 1659          ENTRY(repinsw)
1659 1660          movl    %edx, %ecx
1660 1661          movw    %di, %dx
1661 1662          rep
1662 1663            D16 insl
1663 1664          ret
1664 1665          SET_SIZE(repinsw)
1665 1666  
1666 1667  #elif defined(__i386)
1667 1668  
1668 1669          ENTRY(repinsw)
1669 1670          pushl   %edi
1670 1671          movl    PORT(%esp), %edx
1671 1672          movl    ADDR(%esp), %edi
1672 1673          movl    COUNT(%esp), %ecx
1673 1674          rep
1674 1675            D16 insl
1675 1676          popl    %edi
1676 1677          ret
1677 1678          SET_SIZE(repinsw)
1678 1679  
1679 1680  #endif  /* __i386 */
1680 1681  #endif  /* __lint */
1681 1682  
1682 1683  
1683 1684  #if defined(__lint)
1684 1685  
1685 1686  /* ARGSUSED */
1686 1687  void
1687 1688  repinsb(int port, uint8_t *addr, int count)
1688 1689  {}
1689 1690  
1690 1691  #else   /* __lint */
1691 1692  
1692 1693  #if defined(__amd64)
1693 1694  
1694 1695          ENTRY(repinsb)
1695 1696          movl    %edx, %ecx      
1696 1697          movw    %di, %dx
1697 1698          movq    %rsi, %rdi
1698 1699          rep
1699 1700            insb
1700 1701          ret             
1701 1702          SET_SIZE(repinsb)
1702 1703  
1703 1704  #elif defined(__i386)
1704 1705          
1705 1706          /*
1706 1707           * The arguments and saved registers are on the stack in the
1707 1708           *  following order:
1708 1709           *      |  cnt  |  +16
1709 1710           *      | *addr |  +12
1710 1711           *      | port  |  +8
1711 1712           *      |  eip  |  +4
1712 1713           *      |  esi  |  <-- %esp
1713 1714           * If additional values are pushed onto the stack, make sure
1714 1715           * to adjust the following constants accordingly.
1715 1716           */
1716 1717          .set    IO_PORT, 8
1717 1718          .set    IO_ADDR, 12
1718 1719          .set    IO_COUNT, 16
1719 1720  
1720 1721          ENTRY(repinsb)
1721 1722          pushl   %edi
1722 1723          movl    IO_ADDR(%esp), %edi
1723 1724          movl    IO_COUNT(%esp), %ecx
1724 1725          movl    IO_PORT(%esp), %edx
1725 1726          rep
1726 1727            insb
1727 1728          popl    %edi
1728 1729          ret
1729 1730          SET_SIZE(repinsb)
1730 1731  
1731 1732  #endif  /* __i386 */
1732 1733  #endif  /* __lint */
1733 1734  
1734 1735  
1735 1736  /*
1736 1737   * Input a stream of 32-bit words.
1737 1738   * NOTE: count is a DWORD count.
1738 1739   */
1739 1740  #if defined(__lint)
1740 1741  
1741 1742  /* ARGSUSED */
1742 1743  void
1743 1744  repinsd(int port, uint32_t *addr, int count)
1744 1745  {}
1745 1746  
1746 1747  #else   /* __lint */
1747 1748  
1748 1749  #if defined(__amd64)
1749 1750          
1750 1751          ENTRY(repinsd)
1751 1752          movl    %edx, %ecx
1752 1753          movw    %di, %dx
1753 1754          movq    %rsi, %rdi
1754 1755          rep
1755 1756            insl
1756 1757          ret
1757 1758          SET_SIZE(repinsd)
1758 1759  
1759 1760  #elif defined(__i386)
1760 1761  
1761 1762          ENTRY(repinsd)
1762 1763          pushl   %edi
1763 1764          movl    IO_ADDR(%esp), %edi
1764 1765          movl    IO_COUNT(%esp), %ecx
1765 1766          movl    IO_PORT(%esp), %edx
1766 1767          rep
1767 1768            insl
1768 1769          popl    %edi
1769 1770          ret
1770 1771          SET_SIZE(repinsd)
1771 1772  
1772 1773  #endif  /* __i386 */
1773 1774  #endif  /* __lint */
1774 1775  
1775 1776  /*
1776 1777   * Output a stream of bytes
1777 1778   * NOTE: count is a byte count
1778 1779   */
1779 1780  #if defined(__lint)
1780 1781  
1781 1782  /* ARGSUSED */
1782 1783  void
1783 1784  repoutsb(int port, uint8_t *addr, int count)
1784 1785  {}
1785 1786  
1786 1787  #else   /* __lint */
1787 1788  
1788 1789  #if defined(__amd64)
1789 1790  
1790 1791          ENTRY(repoutsb)
1791 1792          movl    %edx, %ecx
1792 1793          movw    %di, %dx
1793 1794          rep
1794 1795            outsb
1795 1796          ret     
1796 1797          SET_SIZE(repoutsb)
1797 1798  
1798 1799  #elif defined(__i386)
1799 1800  
1800 1801          ENTRY(repoutsb)
1801 1802          pushl   %esi
1802 1803          movl    IO_ADDR(%esp), %esi
1803 1804          movl    IO_COUNT(%esp), %ecx
1804 1805          movl    IO_PORT(%esp), %edx
1805 1806          rep
1806 1807            outsb
1807 1808          popl    %esi
1808 1809          ret
1809 1810          SET_SIZE(repoutsb)
1810 1811  
1811 1812  #endif  /* __i386 */    
1812 1813  #endif  /* __lint */
1813 1814  
1814 1815  /*
1815 1816   * Output a stream of 32-bit words
1816 1817   * NOTE: count is a DWORD count
1817 1818   */
1818 1819  #if defined(__lint)
1819 1820  
1820 1821  /* ARGSUSED */
1821 1822  void
1822 1823  repoutsd(int port, uint32_t *addr, int count)
1823 1824  {}
1824 1825  
1825 1826  #else   /* __lint */
1826 1827  
1827 1828  #if defined(__amd64)
1828 1829  
1829 1830          ENTRY(repoutsd)
1830 1831          movl    %edx, %ecx
1831 1832          movw    %di, %dx
1832 1833          rep
1833 1834            outsl
1834 1835          ret     
1835 1836          SET_SIZE(repoutsd)
1836 1837  
1837 1838  #elif defined(__i386)
1838 1839  
1839 1840          ENTRY(repoutsd)
1840 1841          pushl   %esi
1841 1842          movl    IO_ADDR(%esp), %esi
1842 1843          movl    IO_COUNT(%esp), %ecx
1843 1844          movl    IO_PORT(%esp), %edx
1844 1845          rep
1845 1846            outsl
1846 1847          popl    %esi
1847 1848          ret
1848 1849          SET_SIZE(repoutsd)
1849 1850  
1850 1851  #endif  /* __i386 */
1851 1852  #endif  /* __lint */
1852 1853  
1853 1854  /*
1854 1855   * void int3(void)
1855 1856   * void int18(void)
1856 1857   * void int20(void)
1857 1858   * void int_cmci(void)
1858 1859   */
1859 1860  
1860 1861  #if defined(__lint)
1861 1862  
1862 1863  void
1863 1864  int3(void)
1864 1865  {}
1865 1866  
1866 1867  void
1867 1868  int18(void)
1868 1869  {}
1869 1870  
1870 1871  void
1871 1872  int20(void)
1872 1873  {}
1873 1874  
1874 1875  void
1875 1876  int_cmci(void)
1876 1877  {}
1877 1878  
1878 1879  #else   /* __lint */
1879 1880  
1880 1881          ENTRY(int3)
1881 1882          int     $T_BPTFLT
1882 1883          ret
1883 1884          SET_SIZE(int3)
1884 1885  
1885 1886          ENTRY(int18)
1886 1887          int     $T_MCE
1887 1888          ret
1888 1889          SET_SIZE(int18)
1889 1890  
1890 1891          ENTRY(int20)
1891 1892          movl    boothowto, %eax
1892 1893          andl    $RB_DEBUG, %eax
1893 1894          jz      1f
1894 1895  
1895 1896          int     $T_DBGENTR
1896 1897  1:
1897 1898          rep;    ret     /* use 2 byte return instruction when branch target */
1898 1899                          /* AMD Software Optimization Guide - Section 6.2 */
1899 1900          SET_SIZE(int20)
1900 1901  
1901 1902          ENTRY(int_cmci)
1902 1903          int     $T_ENOEXTFLT
1903 1904          ret
1904 1905          SET_SIZE(int_cmci)
1905 1906  
1906 1907  #endif  /* __lint */
1907 1908  
1908 1909  #if defined(__lint)
1909 1910  
1910 1911  /* ARGSUSED */
1911 1912  int
1912 1913  scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
1913 1914  { return (0); }
1914 1915  
1915 1916  #else   /* __lint */
1916 1917  
1917 1918  #if defined(__amd64)
1918 1919  
1919 1920          ENTRY(scanc)
1920 1921                                          /* rdi == size */
1921 1922                                          /* rsi == cp */
1922 1923                                          /* rdx == table */
1923 1924                                          /* rcx == mask */
1924 1925          addq    %rsi, %rdi              /* end = &cp[size] */
1925 1926  .scanloop:
1926 1927          cmpq    %rdi, %rsi              /* while (cp < end */
1927 1928          jnb     .scandone
1928 1929          movzbq  (%rsi), %r8             /* %r8 = *cp */
1929 1930          incq    %rsi                    /* cp++ */
1930 1931          testb   %cl, (%r8, %rdx)
1931 1932          jz      .scanloop               /*  && (table[*cp] & mask) == 0) */
1932 1933          decq    %rsi                    /* (fix post-increment) */
1933 1934  .scandone:
1934 1935          movl    %edi, %eax
1935 1936          subl    %esi, %eax              /* return (end - cp) */
1936 1937          ret
1937 1938          SET_SIZE(scanc)
1938 1939  
1939 1940  #elif defined(__i386)
1940 1941  
1941 1942          ENTRY(scanc)
1942 1943          pushl   %edi
1943 1944          pushl   %esi
1944 1945          movb    24(%esp), %cl           /* mask = %cl */
1945 1946          movl    16(%esp), %esi          /* cp = %esi */
1946 1947          movl    20(%esp), %edx          /* table = %edx */
1947 1948          movl    %esi, %edi
1948 1949          addl    12(%esp), %edi          /* end = &cp[size]; */
1949 1950  .scanloop:
1950 1951          cmpl    %edi, %esi              /* while (cp < end */
1951 1952          jnb     .scandone
1952 1953          movzbl  (%esi),  %eax           /* %al = *cp */
1953 1954          incl    %esi                    /* cp++ */
1954 1955          movb    (%edx,  %eax), %al      /* %al = table[*cp] */
1955 1956          testb   %al, %cl
1956 1957          jz      .scanloop               /*   && (table[*cp] & mask) == 0) */
1957 1958          dec     %esi                    /* post-incremented */
1958 1959  .scandone:
1959 1960          movl    %edi, %eax
1960 1961          subl    %esi, %eax              /* return (end - cp) */
1961 1962          popl    %esi
1962 1963          popl    %edi
1963 1964          ret
1964 1965          SET_SIZE(scanc)
1965 1966  
1966 1967  #endif  /* __i386 */
1967 1968  #endif  /* __lint */
1968 1969  
1969 1970  /*
1970 1971   * Replacement functions for ones that are normally inlined.
1971 1972   * In addition to the copy in i86.il, they are defined here just in case.
1972 1973   */
1973 1974  
1974 1975  #if defined(__lint)
1975 1976  
1976 1977  ulong_t
1977 1978  intr_clear(void)
1978 1979  { return (0); }
1979 1980  
1980 1981  ulong_t
1981 1982  clear_int_flag(void)
1982 1983  { return (0); }
1983 1984  
1984 1985  #else   /* __lint */
1985 1986  
1986 1987  #if defined(__amd64)
1987 1988  
1988 1989          ENTRY(intr_clear)
1989 1990          ENTRY(clear_int_flag)
1990 1991          pushfq
1991 1992          popq    %rax
1992 1993  #if defined(__xpv)
1993 1994          leaq    xpv_panicking, %rdi
1994 1995          movl    (%rdi), %edi
1995 1996          cmpl    $0, %edi
1996 1997          jne     2f
1997 1998          CLIRET(%rdi, %dl)       /* returns event mask in %dl */
1998 1999          /*
1999 2000           * Synthesize the PS_IE bit from the event mask bit
2000 2001           */
2001 2002          andq    $_BITNOT(PS_IE), %rax
2002 2003          testb   $1, %dl
2003 2004          jnz     1f
2004 2005          orq     $PS_IE, %rax
2005 2006  1:
2006 2007          ret
2007 2008  2:
2008 2009  #endif
2009 2010          CLI(%rdi)
2010 2011          ret
2011 2012          SET_SIZE(clear_int_flag)
2012 2013          SET_SIZE(intr_clear)
2013 2014  
2014 2015  #elif defined(__i386)
2015 2016  
2016 2017          ENTRY(intr_clear)
2017 2018          ENTRY(clear_int_flag)
2018 2019          pushfl
2019 2020          popl    %eax
2020 2021  #if defined(__xpv)
2021 2022          leal    xpv_panicking, %edx
2022 2023          movl    (%edx), %edx
2023 2024          cmpl    $0, %edx
2024 2025          jne     2f
2025 2026          CLIRET(%edx, %cl)       /* returns event mask in %cl */
2026 2027          /*
2027 2028           * Synthesize the PS_IE bit from the event mask bit
2028 2029           */
2029 2030          andl    $_BITNOT(PS_IE), %eax
2030 2031          testb   $1, %cl
2031 2032          jnz     1f
2032 2033          orl     $PS_IE, %eax
2033 2034  1:
2034 2035          ret
2035 2036  2:
2036 2037  #endif
2037 2038          CLI(%edx)
2038 2039          ret
2039 2040          SET_SIZE(clear_int_flag)
2040 2041          SET_SIZE(intr_clear)
2041 2042  
2042 2043  #endif  /* __i386 */
2043 2044  #endif  /* __lint */
2044 2045  
2045 2046  #if defined(__lint)
2046 2047  
2047 2048  struct cpu *
2048 2049  curcpup(void)
2049 2050  { return 0; }
2050 2051  
2051 2052  #else   /* __lint */
2052 2053  
2053 2054  #if defined(__amd64)
2054 2055  
2055 2056          ENTRY(curcpup)
2056 2057          movq    %gs:CPU_SELF, %rax
2057 2058          ret
2058 2059          SET_SIZE(curcpup)
2059 2060  
2060 2061  #elif defined(__i386)
2061 2062  
2062 2063          ENTRY(curcpup)
2063 2064          movl    %gs:CPU_SELF, %eax
2064 2065          ret
2065 2066          SET_SIZE(curcpup)
2066 2067  
2067 2068  #endif  /* __i386 */
2068 2069  #endif  /* __lint */
2069 2070  
2070 2071  /* htonll(), ntohll(), htonl(), ntohl(), htons(), ntohs()
2071 2072   * These functions reverse the byte order of the input parameter and returns
2072 2073   * the result.  This is to convert the byte order from host byte order
2073 2074   * (little endian) to network byte order (big endian), or vice versa.
2074 2075   */
2075 2076  
2076 2077  #if defined(__lint)
2077 2078  
2078 2079  uint64_t
2079 2080  htonll(uint64_t i)
2080 2081  { return (i); }
2081 2082  
2082 2083  uint64_t
2083 2084  ntohll(uint64_t i)
2084 2085  { return (i); }
2085 2086  
2086 2087  uint32_t
2087 2088  htonl(uint32_t i)
2088 2089  { return (i); }
2089 2090  
2090 2091  uint32_t
2091 2092  ntohl(uint32_t i)
2092 2093  { return (i); }
2093 2094  
2094 2095  uint16_t
2095 2096  htons(uint16_t i)
2096 2097  { return (i); }
2097 2098  
2098 2099  uint16_t
2099 2100  ntohs(uint16_t i)
2100 2101  { return (i); }
2101 2102  
2102 2103  #else   /* __lint */
2103 2104  
2104 2105  #if defined(__amd64)
2105 2106  
2106 2107          ENTRY(htonll)
2107 2108          ALTENTRY(ntohll)
2108 2109          movq    %rdi, %rax
2109 2110          bswapq  %rax
2110 2111          ret
2111 2112          SET_SIZE(ntohll)
2112 2113          SET_SIZE(htonll)
2113 2114  
2114 2115          /* XX64 there must be shorter sequences for this */
2115 2116          ENTRY(htonl)
2116 2117          ALTENTRY(ntohl)
2117 2118          movl    %edi, %eax
2118 2119          bswap   %eax
2119 2120          ret
2120 2121          SET_SIZE(ntohl)
2121 2122          SET_SIZE(htonl)
2122 2123  
2123 2124          /* XX64 there must be better sequences for this */
2124 2125          ENTRY(htons)
2125 2126          ALTENTRY(ntohs)
2126 2127          movl    %edi, %eax
2127 2128          bswap   %eax
2128 2129          shrl    $16, %eax
2129 2130          ret
2130 2131          SET_SIZE(ntohs)
2131 2132          SET_SIZE(htons)
2132 2133  
2133 2134  #elif defined(__i386)
2134 2135  
2135 2136          ENTRY(htonll)
2136 2137          ALTENTRY(ntohll)
2137 2138          movl    4(%esp), %edx
2138 2139          movl    8(%esp), %eax
2139 2140          bswap   %edx
2140 2141          bswap   %eax
2141 2142          ret
2142 2143          SET_SIZE(ntohll)
2143 2144          SET_SIZE(htonll)
2144 2145  
2145 2146          ENTRY(htonl)
2146 2147          ALTENTRY(ntohl)
2147 2148          movl    4(%esp), %eax
2148 2149          bswap   %eax
2149 2150          ret
2150 2151          SET_SIZE(ntohl)
2151 2152          SET_SIZE(htonl)
2152 2153  
2153 2154          ENTRY(htons)
2154 2155          ALTENTRY(ntohs)
2155 2156          movl    4(%esp), %eax
2156 2157          bswap   %eax
2157 2158          shrl    $16, %eax
2158 2159          ret
2159 2160          SET_SIZE(ntohs)
2160 2161          SET_SIZE(htons)
2161 2162  
2162 2163  #endif  /* __i386 */
2163 2164  #endif  /* __lint */
2164 2165  
2165 2166  
2166 2167  #if defined(__lint)
2167 2168  
2168 2169  /* ARGSUSED */
2169 2170  void
2170 2171  intr_restore(ulong_t i)
2171 2172  { return; }
2172 2173  
2173 2174  /* ARGSUSED */
2174 2175  void
2175 2176  restore_int_flag(ulong_t i)
2176 2177  { return; }
2177 2178  
2178 2179  #else   /* __lint */
2179 2180  
2180 2181  #if defined(__amd64)
2181 2182  
2182 2183          ENTRY(intr_restore)
2183 2184          ENTRY(restore_int_flag)
2184 2185          testq   $PS_IE, %rdi
2185 2186          jz      1f
2186 2187  #if defined(__xpv)
2187 2188          leaq    xpv_panicking, %rsi
2188 2189          movl    (%rsi), %esi
2189 2190          cmpl    $0, %esi
2190 2191          jne     1f
2191 2192          /*
2192 2193           * Since we're -really- running unprivileged, our attempt
2193 2194           * to change the state of the IF bit will be ignored.
2194 2195           * The virtual IF bit is tweaked by CLI and STI.
2195 2196           */
2196 2197          IE_TO_EVENT_MASK(%rsi, %rdi)
2197 2198  #else
2198 2199          sti
2199 2200  #endif
2200 2201  1:
2201 2202          ret
2202 2203          SET_SIZE(restore_int_flag)
2203 2204          SET_SIZE(intr_restore)
2204 2205  
2205 2206  #elif defined(__i386)
2206 2207  
2207 2208          ENTRY(intr_restore)
2208 2209          ENTRY(restore_int_flag)
2209 2210          testl   $PS_IE, 4(%esp)
2210 2211          jz      1f
2211 2212  #if defined(__xpv)
2212 2213          leal    xpv_panicking, %edx
2213 2214          movl    (%edx), %edx
2214 2215          cmpl    $0, %edx
2215 2216          jne     1f
2216 2217          /*
2217 2218           * Since we're -really- running unprivileged, our attempt
2218 2219           * to change the state of the IF bit will be ignored.
2219 2220           * The virtual IF bit is tweaked by CLI and STI.
2220 2221           */
2221 2222          IE_TO_EVENT_MASK(%edx, 4(%esp))
2222 2223  #else
2223 2224          sti
2224 2225  #endif
2225 2226  1:
2226 2227          ret
2227 2228          SET_SIZE(restore_int_flag)
2228 2229          SET_SIZE(intr_restore)
2229 2230  
2230 2231  #endif  /* __i386 */
2231 2232  #endif  /* __lint */
2232 2233  
2233 2234  #if defined(__lint)
2234 2235  
2235 2236  void
2236 2237  sti(void)
2237 2238  {}
2238 2239  
2239 2240  void
2240 2241  cli(void)
2241 2242  {}
2242 2243  
2243 2244  #else   /* __lint */
2244 2245  
2245 2246          ENTRY(sti)
2246 2247          STI
2247 2248          ret
2248 2249          SET_SIZE(sti)
2249 2250  
2250 2251          ENTRY(cli)
2251 2252  #if defined(__amd64)
2252 2253          CLI(%rax)
2253 2254  #elif defined(__i386)
2254 2255          CLI(%eax)
2255 2256  #endif  /* __i386 */
2256 2257          ret
2257 2258          SET_SIZE(cli)
2258 2259  
2259 2260  #endif  /* __lint */
2260 2261  
2261 2262  #if defined(__lint)
2262 2263  
2263 2264  dtrace_icookie_t
2264 2265  dtrace_interrupt_disable(void)
2265 2266  { return (0); }
2266 2267  
2267 2268  #else   /* __lint */
2268 2269  
2269 2270  #if defined(__amd64)
2270 2271  
2271 2272          ENTRY(dtrace_interrupt_disable)
2272 2273          pushfq
2273 2274          popq    %rax
2274 2275  #if defined(__xpv)
2275 2276          leaq    xpv_panicking, %rdi
2276 2277          movl    (%rdi), %edi
2277 2278          cmpl    $0, %edi
2278 2279          jne     .dtrace_interrupt_disable_done
2279 2280          CLIRET(%rdi, %dl)       /* returns event mask in %dl */
2280 2281          /*
2281 2282           * Synthesize the PS_IE bit from the event mask bit
2282 2283           */
2283 2284          andq    $_BITNOT(PS_IE), %rax
2284 2285          testb   $1, %dl
2285 2286          jnz     .dtrace_interrupt_disable_done
2286 2287          orq     $PS_IE, %rax
2287 2288  #else
2288 2289          CLI(%rdx)
2289 2290  #endif
2290 2291  .dtrace_interrupt_disable_done:
2291 2292          ret
2292 2293          SET_SIZE(dtrace_interrupt_disable)
2293 2294  
2294 2295  #elif defined(__i386)
2295 2296                  
2296 2297          ENTRY(dtrace_interrupt_disable)
2297 2298          pushfl
2298 2299          popl    %eax
2299 2300  #if defined(__xpv)
2300 2301          leal    xpv_panicking, %edx
2301 2302          movl    (%edx), %edx
2302 2303          cmpl    $0, %edx
2303 2304          jne     .dtrace_interrupt_disable_done
2304 2305          CLIRET(%edx, %cl)       /* returns event mask in %cl */
2305 2306          /*
2306 2307           * Synthesize the PS_IE bit from the event mask bit
2307 2308           */
2308 2309          andl    $_BITNOT(PS_IE), %eax
2309 2310          testb   $1, %cl
2310 2311          jnz     .dtrace_interrupt_disable_done
2311 2312          orl     $PS_IE, %eax
2312 2313  #else
2313 2314          CLI(%edx)
2314 2315  #endif
2315 2316  .dtrace_interrupt_disable_done:
2316 2317          ret
2317 2318          SET_SIZE(dtrace_interrupt_disable)
2318 2319  
2319 2320  #endif  /* __i386 */    
2320 2321  #endif  /* __lint */
2321 2322  
2322 2323  #if defined(__lint)
2323 2324  
2324 2325  /*ARGSUSED*/
2325 2326  void
2326 2327  dtrace_interrupt_enable(dtrace_icookie_t cookie)
2327 2328  {}
2328 2329  
2329 2330  #else   /* __lint */
2330 2331  
2331 2332  #if defined(__amd64)
2332 2333  
2333 2334          ENTRY(dtrace_interrupt_enable)
2334 2335          pushq   %rdi
2335 2336          popfq
2336 2337  #if defined(__xpv)
2337 2338          leaq    xpv_panicking, %rdx
2338 2339          movl    (%rdx), %edx
2339 2340          cmpl    $0, %edx
2340 2341          jne     .dtrace_interrupt_enable_done
2341 2342          /*
2342 2343           * Since we're -really- running unprivileged, our attempt
2343 2344           * to change the state of the IF bit will be ignored. The
2344 2345           * virtual IF bit is tweaked by CLI and STI.
2345 2346           */
2346 2347          IE_TO_EVENT_MASK(%rdx, %rdi)
2347 2348  #endif
2348 2349  .dtrace_interrupt_enable_done:
2349 2350          ret
2350 2351          SET_SIZE(dtrace_interrupt_enable)
2351 2352  
2352 2353  #elif defined(__i386)
2353 2354                  
2354 2355          ENTRY(dtrace_interrupt_enable)
2355 2356          movl    4(%esp), %eax
2356 2357          pushl   %eax
2357 2358          popfl
2358 2359  #if defined(__xpv)
2359 2360          leal    xpv_panicking, %edx
2360 2361          movl    (%edx), %edx
2361 2362          cmpl    $0, %edx
2362 2363          jne     .dtrace_interrupt_enable_done
2363 2364          /*
2364 2365           * Since we're -really- running unprivileged, our attempt
2365 2366           * to change the state of the IF bit will be ignored. The
2366 2367           * virtual IF bit is tweaked by CLI and STI.
2367 2368           */
2368 2369          IE_TO_EVENT_MASK(%edx, %eax)
2369 2370  #endif
2370 2371  .dtrace_interrupt_enable_done:
2371 2372          ret
2372 2373          SET_SIZE(dtrace_interrupt_enable)
2373 2374  
2374 2375  #endif  /* __i386 */    
2375 2376  #endif  /* __lint */
2376 2377  
2377 2378  
2378 2379  #if defined(lint)
2379 2380  
2380 2381  void
2381 2382  dtrace_membar_producer(void)
2382 2383  {}
2383 2384  
2384 2385  void
2385 2386  dtrace_membar_consumer(void)
2386 2387  {}
2387 2388  
2388 2389  #else   /* __lint */
2389 2390  
2390 2391          ENTRY(dtrace_membar_producer)
2391 2392          rep;    ret     /* use 2 byte return instruction when branch target */
2392 2393                          /* AMD Software Optimization Guide - Section 6.2 */
2393 2394          SET_SIZE(dtrace_membar_producer)
2394 2395  
2395 2396          ENTRY(dtrace_membar_consumer)
2396 2397          rep;    ret     /* use 2 byte return instruction when branch target */
2397 2398                          /* AMD Software Optimization Guide - Section 6.2 */
2398 2399          SET_SIZE(dtrace_membar_consumer)
2399 2400  
2400 2401  #endif  /* __lint */
2401 2402  
2402 2403  #if defined(__lint)
2403 2404  
2404 2405  kthread_id_t
2405 2406  threadp(void)
2406 2407  { return ((kthread_id_t)0); }
2407 2408  
2408 2409  #else   /* __lint */
2409 2410  
2410 2411  #if defined(__amd64)
2411 2412          
2412 2413          ENTRY(threadp)
2413 2414          movq    %gs:CPU_THREAD, %rax
2414 2415          ret
2415 2416          SET_SIZE(threadp)
2416 2417  
2417 2418  #elif defined(__i386)
2418 2419  
2419 2420          ENTRY(threadp)
2420 2421          movl    %gs:CPU_THREAD, %eax
2421 2422          ret
2422 2423          SET_SIZE(threadp)
2423 2424  
2424 2425  #endif  /* __i386 */
2425 2426  #endif  /* __lint */
2426 2427  
2427 2428  /*
2428 2429   *   Checksum routine for Internet Protocol Headers
2429 2430   */
2430 2431  
2431 2432  #if defined(__lint)
2432 2433  
2433 2434  /* ARGSUSED */
2434 2435  unsigned int
2435 2436  ip_ocsum(
2436 2437          ushort_t *address,      /* ptr to 1st message buffer */
2437 2438          int halfword_count,     /* length of data */
2438 2439          unsigned int sum)       /* partial checksum */
2439 2440  { 
2440 2441          int             i;
2441 2442          unsigned int    psum = 0;       /* partial sum */
2442 2443  
2443 2444          for (i = 0; i < halfword_count; i++, address++) {
2444 2445                  psum += *address;
2445 2446          }
2446 2447  
2447 2448          while ((psum >> 16) != 0) {
2448 2449                  psum = (psum & 0xffff) + (psum >> 16);
2449 2450          }
2450 2451  
2451 2452          psum += sum;
2452 2453  
2453 2454          while ((psum >> 16) != 0) {
2454 2455                  psum = (psum & 0xffff) + (psum >> 16);
2455 2456          }
2456 2457  
2457 2458          return (psum);
2458 2459  }
2459 2460  
2460 2461  #else   /* __lint */
2461 2462  
2462 2463  #if defined(__amd64)
2463 2464  
2464 2465          ENTRY(ip_ocsum)
2465 2466          pushq   %rbp
2466 2467          movq    %rsp, %rbp
2467 2468  #ifdef DEBUG
2468 2469          movq    postbootkernelbase(%rip), %rax
2469 2470          cmpq    %rax, %rdi
2470 2471          jnb     1f
2471 2472          xorl    %eax, %eax
2472 2473          movq    %rdi, %rsi
2473 2474          leaq    .ip_ocsum_panic_msg(%rip), %rdi
2474 2475          call    panic
2475 2476          /*NOTREACHED*/
2476 2477  .ip_ocsum_panic_msg:
2477 2478          .string "ip_ocsum: address 0x%p below kernelbase\n"
2478 2479  1:
2479 2480  #endif
2480 2481          movl    %esi, %ecx      /* halfword_count */
2481 2482          movq    %rdi, %rsi      /* address */
2482 2483                                  /* partial sum in %edx */
2483 2484          xorl    %eax, %eax
2484 2485          testl   %ecx, %ecx
2485 2486          jz      .ip_ocsum_done
2486 2487          testq   $3, %rsi
2487 2488          jnz     .ip_csum_notaligned
2488 2489  .ip_csum_aligned:       /* XX64 opportunities for 8-byte operations? */
2489 2490  .next_iter:
2490 2491          /* XX64 opportunities for prefetch? */
2491 2492          /* XX64 compute csum with 64 bit quantities? */
2492 2493          subl    $32, %ecx
2493 2494          jl      .less_than_32
2494 2495  
2495 2496          addl    0(%rsi), %edx
2496 2497  .only60:
2497 2498          adcl    4(%rsi), %eax
2498 2499  .only56:
2499 2500          adcl    8(%rsi), %edx
2500 2501  .only52:
2501 2502          adcl    12(%rsi), %eax
2502 2503  .only48:
2503 2504          adcl    16(%rsi), %edx
2504 2505  .only44:
2505 2506          adcl    20(%rsi), %eax
2506 2507  .only40:
2507 2508          adcl    24(%rsi), %edx
2508 2509  .only36:
2509 2510          adcl    28(%rsi), %eax
2510 2511  .only32:
2511 2512          adcl    32(%rsi), %edx
2512 2513  .only28:
2513 2514          adcl    36(%rsi), %eax
2514 2515  .only24:
2515 2516          adcl    40(%rsi), %edx
2516 2517  .only20:
2517 2518          adcl    44(%rsi), %eax
2518 2519  .only16:
2519 2520          adcl    48(%rsi), %edx
2520 2521  .only12:
2521 2522          adcl    52(%rsi), %eax
2522 2523  .only8:
2523 2524          adcl    56(%rsi), %edx
2524 2525  .only4:
2525 2526          adcl    60(%rsi), %eax  /* could be adding -1 and -1 with a carry */
2526 2527  .only0:
2527 2528          adcl    $0, %eax        /* could be adding -1 in eax with a carry */
2528 2529          adcl    $0, %eax
2529 2530  
2530 2531          addq    $64, %rsi
2531 2532          testl   %ecx, %ecx
2532 2533          jnz     .next_iter
2533 2534  
2534 2535  .ip_ocsum_done:
2535 2536          addl    %eax, %edx
2536 2537          adcl    $0, %edx
2537 2538          movl    %edx, %eax      /* form a 16 bit checksum by */
2538 2539          shrl    $16, %eax       /* adding two halves of 32 bit checksum */
2539 2540          addw    %dx, %ax
2540 2541          adcw    $0, %ax
2541 2542          andl    $0xffff, %eax
2542 2543          leave
2543 2544          ret
2544 2545  
2545 2546  .ip_csum_notaligned:
2546 2547          xorl    %edi, %edi
2547 2548          movw    (%rsi), %di
2548 2549          addl    %edi, %edx
2549 2550          adcl    $0, %edx
2550 2551          addq    $2, %rsi
2551 2552          decl    %ecx
2552 2553          jmp     .ip_csum_aligned
2553 2554  
2554 2555  .less_than_32:
2555 2556          addl    $32, %ecx
2556 2557          testl   $1, %ecx
2557 2558          jz      .size_aligned
2558 2559          andl    $0xfe, %ecx
2559 2560          movzwl  (%rsi, %rcx, 2), %edi
2560 2561          addl    %edi, %edx
2561 2562          adcl    $0, %edx
2562 2563  .size_aligned:
2563 2564          movl    %ecx, %edi
2564 2565          shrl    $1, %ecx
2565 2566          shl     $1, %edi
2566 2567          subq    $64, %rdi
2567 2568          addq    %rdi, %rsi
2568 2569          leaq    .ip_ocsum_jmptbl(%rip), %rdi
2569 2570          leaq    (%rdi, %rcx, 8), %rdi
2570 2571          xorl    %ecx, %ecx
2571 2572          clc
2572 2573          jmp     *(%rdi)
2573 2574  
2574 2575          .align  8
2575 2576  .ip_ocsum_jmptbl:
2576 2577          .quad   .only0, .only4, .only8, .only12, .only16, .only20
2577 2578          .quad   .only24, .only28, .only32, .only36, .only40, .only44
2578 2579          .quad   .only48, .only52, .only56, .only60
2579 2580          SET_SIZE(ip_ocsum)
2580 2581  
2581 2582  #elif defined(__i386)
2582 2583  
2583 2584          ENTRY(ip_ocsum)
2584 2585          pushl   %ebp
2585 2586          movl    %esp, %ebp
2586 2587          pushl   %ebx
2587 2588          pushl   %esi
2588 2589          pushl   %edi
2589 2590          movl    12(%ebp), %ecx  /* count of half words */
2590 2591          movl    16(%ebp), %edx  /* partial checksum */
2591 2592          movl    8(%ebp), %esi
2592 2593          xorl    %eax, %eax
2593 2594          testl   %ecx, %ecx
2594 2595          jz      .ip_ocsum_done
2595 2596  
2596 2597          testl   $3, %esi
2597 2598          jnz     .ip_csum_notaligned
2598 2599  .ip_csum_aligned:
2599 2600  .next_iter:
2600 2601          subl    $32, %ecx
2601 2602          jl      .less_than_32
2602 2603  
2603 2604          addl    0(%esi), %edx
2604 2605  .only60:
2605 2606          adcl    4(%esi), %eax
2606 2607  .only56:
2607 2608          adcl    8(%esi), %edx
2608 2609  .only52:
2609 2610          adcl    12(%esi), %eax
2610 2611  .only48:
2611 2612          adcl    16(%esi), %edx
2612 2613  .only44:
2613 2614          adcl    20(%esi), %eax
2614 2615  .only40:
2615 2616          adcl    24(%esi), %edx
2616 2617  .only36:
2617 2618          adcl    28(%esi), %eax
2618 2619  .only32:
2619 2620          adcl    32(%esi), %edx
2620 2621  .only28:
2621 2622          adcl    36(%esi), %eax
2622 2623  .only24:
2623 2624          adcl    40(%esi), %edx
2624 2625  .only20:
2625 2626          adcl    44(%esi), %eax
2626 2627  .only16:
2627 2628          adcl    48(%esi), %edx
2628 2629  .only12:
2629 2630          adcl    52(%esi), %eax
2630 2631  .only8:
2631 2632          adcl    56(%esi), %edx
2632 2633  .only4:
2633 2634          adcl    60(%esi), %eax  /* We could be adding -1 and -1 with a carry */
2634 2635  .only0:
2635 2636          adcl    $0, %eax        /* we could be adding -1 in eax with a carry */
2636 2637          adcl    $0, %eax
2637 2638  
2638 2639          addl    $64, %esi
2639 2640          andl    %ecx, %ecx
2640 2641          jnz     .next_iter
2641 2642  
2642 2643  .ip_ocsum_done:
2643 2644          addl    %eax, %edx
2644 2645          adcl    $0, %edx
2645 2646          movl    %edx, %eax      /* form a 16 bit checksum by */
2646 2647          shrl    $16, %eax       /* adding two halves of 32 bit checksum */
2647 2648          addw    %dx, %ax
2648 2649          adcw    $0, %ax
2649 2650          andl    $0xffff, %eax
2650 2651          popl    %edi            /* restore registers */
2651 2652          popl    %esi
2652 2653          popl    %ebx
2653 2654          leave
2654 2655          ret
2655 2656  
2656 2657  .ip_csum_notaligned:
2657 2658          xorl    %edi, %edi
2658 2659          movw    (%esi), %di
2659 2660          addl    %edi, %edx
2660 2661          adcl    $0, %edx
2661 2662          addl    $2, %esi
2662 2663          decl    %ecx
2663 2664          jmp     .ip_csum_aligned
2664 2665  
2665 2666  .less_than_32:
2666 2667          addl    $32, %ecx
2667 2668          testl   $1, %ecx
2668 2669          jz      .size_aligned
2669 2670          andl    $0xfe, %ecx
2670 2671          movzwl  (%esi, %ecx, 2), %edi
2671 2672          addl    %edi, %edx
2672 2673          adcl    $0, %edx
2673 2674  .size_aligned:
2674 2675          movl    %ecx, %edi
2675 2676          shrl    $1, %ecx
2676 2677          shl     $1, %edi
2677 2678          subl    $64, %edi
2678 2679          addl    %edi, %esi
2679 2680          movl    $.ip_ocsum_jmptbl, %edi
2680 2681          lea     (%edi, %ecx, 4), %edi
2681 2682          xorl    %ecx, %ecx
2682 2683          clc
2683 2684          jmp     *(%edi)
2684 2685          SET_SIZE(ip_ocsum)
2685 2686  
2686 2687          .data
2687 2688          .align  4
2688 2689  
2689 2690  .ip_ocsum_jmptbl:
2690 2691          .long   .only0, .only4, .only8, .only12, .only16, .only20
2691 2692          .long   .only24, .only28, .only32, .only36, .only40, .only44
2692 2693          .long   .only48, .only52, .only56, .only60
2693 2694  
2694 2695          
2695 2696  #endif  /* __i386 */            
2696 2697  #endif  /* __lint */
2697 2698  
2698 2699  /*
2699 2700   * multiply two long numbers and yield a u_longlong_t result, callable from C.
2700 2701   * Provided to manipulate hrtime_t values.
2701 2702   */
2702 2703  #if defined(__lint)
2703 2704  
2704 2705  /* result = a * b; */
2705 2706  
2706 2707  /* ARGSUSED */
2707 2708  unsigned long long
2708 2709  mul32(uint_t a, uint_t b)
2709 2710  { return (0); }
2710 2711  
2711 2712  #else   /* __lint */
2712 2713  
2713 2714  #if defined(__amd64)
2714 2715  
2715 2716          ENTRY(mul32)
2716 2717          xorl    %edx, %edx      /* XX64 joe, paranoia? */
2717 2718          movl    %edi, %eax
2718 2719          mull    %esi
2719 2720          shlq    $32, %rdx       
2720 2721          orq     %rdx, %rax
2721 2722          ret
2722 2723          SET_SIZE(mul32)
2723 2724  
2724 2725  #elif defined(__i386)
2725 2726  
2726 2727          ENTRY(mul32)
2727 2728          movl    8(%esp), %eax
2728 2729          movl    4(%esp), %ecx
2729 2730          mull    %ecx
2730 2731          ret
2731 2732          SET_SIZE(mul32)
2732 2733  
2733 2734  #endif  /* __i386 */
2734 2735  #endif  /* __lint */
2735 2736  
2736 2737  #if defined(notused)
2737 2738  #if defined(__lint)
2738 2739  /* ARGSUSED */
2739 2740  void
2740 2741  load_pte64(uint64_t *pte, uint64_t pte_value)
2741 2742  {}
2742 2743  #else   /* __lint */
2743 2744          .globl load_pte64
2744 2745  load_pte64:
2745 2746          movl    4(%esp), %eax
2746 2747          movl    8(%esp), %ecx
2747 2748          movl    12(%esp), %edx
2748 2749          movl    %edx, 4(%eax)
2749 2750          movl    %ecx, (%eax)
2750 2751          ret
2751 2752  #endif  /* __lint */
2752 2753  #endif  /* notused */
2753 2754  
2754 2755  #if defined(__lint)
2755 2756  
2756 2757  /*ARGSUSED*/
2757 2758  void
2758 2759  scan_memory(caddr_t addr, size_t size)
2759 2760  {}
2760 2761  
2761 2762  #else   /* __lint */
2762 2763  
2763 2764  #if defined(__amd64)
2764 2765  
2765 2766          ENTRY(scan_memory)
2766 2767          shrq    $3, %rsi        /* convert %rsi from byte to quadword count */
2767 2768          jz      .scanm_done
2768 2769          movq    %rsi, %rcx      /* move count into rep control register */
2769 2770          movq    %rdi, %rsi      /* move addr into lodsq control reg. */
2770 2771          rep lodsq               /* scan the memory range */
2771 2772  .scanm_done:
2772 2773          rep;    ret     /* use 2 byte return instruction when branch target */
2773 2774                          /* AMD Software Optimization Guide - Section 6.2 */
2774 2775          SET_SIZE(scan_memory)
2775 2776  
2776 2777  #elif defined(__i386)
2777 2778  
2778 2779          ENTRY(scan_memory)
2779 2780          pushl   %ecx
2780 2781          pushl   %esi
2781 2782          movl    16(%esp), %ecx  /* move 2nd arg into rep control register */
2782 2783          shrl    $2, %ecx        /* convert from byte count to word count */
2783 2784          jz      .scanm_done
2784 2785          movl    12(%esp), %esi  /* move 1st arg into lodsw control register */
2785 2786          .byte   0xf3            /* rep prefix.  lame assembler.  sigh. */
2786 2787          lodsl
2787 2788  .scanm_done:
2788 2789          popl    %esi
2789 2790          popl    %ecx
2790 2791          ret
2791 2792          SET_SIZE(scan_memory)
2792 2793  
2793 2794  #endif  /* __i386 */
2794 2795  #endif  /* __lint */
2795 2796  
2796 2797  
2797 2798  #if defined(__lint)
2798 2799  
2799 2800  /*ARGSUSED */
2800 2801  int
2801 2802  lowbit(ulong_t i)
2802 2803  { return (0); }
2803 2804  
2804 2805  #else   /* __lint */
2805 2806  
2806 2807  #if defined(__amd64)
2807 2808  
2808 2809          ENTRY(lowbit)
2809 2810          movl    $-1, %eax
2810 2811          bsfq    %rdi, %rdi
2811 2812          cmovnz  %edi, %eax
2812 2813          incl    %eax
2813 2814          ret
2814 2815          SET_SIZE(lowbit)
2815 2816  
2816 2817  #elif defined(__i386)
2817 2818  
2818 2819          ENTRY(lowbit)
2819 2820          bsfl    4(%esp), %eax
2820 2821          jz      0f
2821 2822          incl    %eax
2822 2823          ret
2823 2824  0:
2824 2825          xorl    %eax, %eax
2825 2826          ret
2826 2827          SET_SIZE(lowbit)
2827 2828  
2828 2829  #endif  /* __i386 */
2829 2830  #endif  /* __lint */
2830 2831  
2831 2832  #if defined(__lint)
2832 2833  
2833 2834  /*ARGSUSED*/
2834 2835  int
2835 2836  highbit(ulong_t i)
2836 2837  { return (0); }
2837 2838  
2838 2839  /*ARGSUSED*/
2839 2840  int
2840 2841  highbit64(uint64_t i)
2841 2842  { return (0); }
2842 2843  
2843 2844  #else   /* __lint */
2844 2845  
2845 2846  #if defined(__amd64)
2846 2847  
2847 2848          ENTRY(highbit)
2848 2849          ALTENTRY(highbit64)
2849 2850          movl    $-1, %eax
2850 2851          bsrq    %rdi, %rdi
2851 2852          cmovnz  %edi, %eax
2852 2853          incl    %eax
2853 2854          ret
2854 2855          SET_SIZE(highbit64)
2855 2856          SET_SIZE(highbit)
2856 2857  
2857 2858  #elif defined(__i386)
2858 2859  
2859 2860          ENTRY(highbit)
2860 2861          bsrl    4(%esp), %eax
2861 2862          jz      0f
2862 2863          incl    %eax
2863 2864          ret
2864 2865  0:
2865 2866          xorl    %eax, %eax
2866 2867          ret    
2867 2868          SET_SIZE(highbit)
2868 2869  
2869 2870          ENTRY(highbit64)
2870 2871          bsrl    8(%esp), %eax
2871 2872          jz      highbit
2872 2873          addl    $33, %eax
2873 2874          ret
2874 2875          SET_SIZE(highbit64)
2875 2876  
2876 2877  #endif  /* __i386 */
2877 2878  #endif  /* __lint */
2878 2879  
2879 2880  #if defined(__lint)
2880 2881  
2881 2882  /*ARGSUSED*/
2882 2883  uint64_t
2883 2884  rdmsr(uint_t r)
2884 2885  { return (0); }
2885 2886  
2886 2887  /*ARGSUSED*/
2887 2888  void
2888 2889  wrmsr(uint_t r, const uint64_t val)
2889 2890  {}
2890 2891  
2891 2892  /*ARGSUSED*/
2892 2893  uint64_t
2893 2894  xrdmsr(uint_t r)
2894 2895  { return (0); }
2895 2896  
2896 2897  /*ARGSUSED*/
2897 2898  void
2898 2899  xwrmsr(uint_t r, const uint64_t val)
2899 2900  {}
2900 2901  
2901 2902  void
2902 2903  invalidate_cache(void)
2903 2904  {}
2904 2905  
2905 2906  /*ARGSUSED*/
2906 2907  uint64_t
2907 2908  get_xcr(uint_t r)
2908 2909  { return (0); }
2909 2910  
2910 2911  /*ARGSUSED*/
2911 2912  void
2912 2913  set_xcr(uint_t r, const uint64_t val)
2913 2914  {}
2914 2915  
2915 2916  #else  /* __lint */
2916 2917  
2917 2918  #define XMSR_ACCESS_VAL         $0x9c5a203a
2918 2919  
2919 2920  #if defined(__amd64)
2920 2921          
2921 2922          ENTRY(rdmsr)
2922 2923          movl    %edi, %ecx
2923 2924          rdmsr
2924 2925          shlq    $32, %rdx
2925 2926          orq     %rdx, %rax
2926 2927          ret
2927 2928          SET_SIZE(rdmsr)
2928 2929  
2929 2930          ENTRY(wrmsr)
2930 2931          movq    %rsi, %rdx
2931 2932          shrq    $32, %rdx
2932 2933          movl    %esi, %eax
2933 2934          movl    %edi, %ecx
2934 2935          wrmsr
2935 2936          ret
2936 2937          SET_SIZE(wrmsr)
2937 2938  
2938 2939          ENTRY(xrdmsr)
2939 2940          pushq   %rbp
2940 2941          movq    %rsp, %rbp
2941 2942          movl    %edi, %ecx
2942 2943          movl    XMSR_ACCESS_VAL, %edi   /* this value is needed to access MSR */
2943 2944          rdmsr
2944 2945          shlq    $32, %rdx
2945 2946          orq     %rdx, %rax
2946 2947          leave
2947 2948          ret
2948 2949          SET_SIZE(xrdmsr)
2949 2950  
2950 2951          ENTRY(xwrmsr)
2951 2952          pushq   %rbp
2952 2953          movq    %rsp, %rbp
2953 2954          movl    %edi, %ecx
2954 2955          movl    XMSR_ACCESS_VAL, %edi   /* this value is needed to access MSR */
2955 2956          movq    %rsi, %rdx
2956 2957          shrq    $32, %rdx
2957 2958          movl    %esi, %eax
2958 2959          wrmsr
2959 2960          leave
2960 2961          ret
2961 2962          SET_SIZE(xwrmsr)
2962 2963  
2963 2964          ENTRY(get_xcr)
2964 2965          movl    %edi, %ecx
2965 2966          #xgetbv
2966 2967          .byte   0x0f,0x01,0xd0
2967 2968          shlq    $32, %rdx
2968 2969          orq     %rdx, %rax
2969 2970          ret
2970 2971          SET_SIZE(get_xcr)
2971 2972  
2972 2973          ENTRY(set_xcr)
2973 2974          movq    %rsi, %rdx
2974 2975          shrq    $32, %rdx
2975 2976          movl    %esi, %eax
2976 2977          movl    %edi, %ecx
2977 2978          #xsetbv
2978 2979          .byte   0x0f,0x01,0xd1
2979 2980          ret
2980 2981          SET_SIZE(set_xcr)
2981 2982  
2982 2983  #elif defined(__i386)
2983 2984  
2984 2985          ENTRY(rdmsr)
2985 2986          movl    4(%esp), %ecx
2986 2987          rdmsr
2987 2988          ret
2988 2989          SET_SIZE(rdmsr)
2989 2990  
2990 2991          ENTRY(wrmsr)
2991 2992          movl    4(%esp), %ecx
2992 2993          movl    8(%esp), %eax
2993 2994          movl    12(%esp), %edx 
2994 2995          wrmsr
2995 2996          ret
2996 2997          SET_SIZE(wrmsr)
2997 2998  
2998 2999          ENTRY(xrdmsr)
2999 3000          pushl   %ebp
3000 3001          movl    %esp, %ebp
3001 3002          movl    8(%esp), %ecx
3002 3003          pushl   %edi
3003 3004          movl    XMSR_ACCESS_VAL, %edi   /* this value is needed to access MSR */
3004 3005          rdmsr
3005 3006          popl    %edi
3006 3007          leave
3007 3008          ret
3008 3009          SET_SIZE(xrdmsr)
3009 3010  
3010 3011          ENTRY(xwrmsr)
3011 3012          pushl   %ebp
3012 3013          movl    %esp, %ebp
3013 3014          movl    8(%esp), %ecx
3014 3015          movl    12(%esp), %eax
3015 3016          movl    16(%esp), %edx 
3016 3017          pushl   %edi
3017 3018          movl    XMSR_ACCESS_VAL, %edi   /* this value is needed to access MSR */
3018 3019          wrmsr
3019 3020          popl    %edi
3020 3021          leave
3021 3022          ret
3022 3023          SET_SIZE(xwrmsr)
3023 3024  
3024 3025          ENTRY(get_xcr)
3025 3026          movl    4(%esp), %ecx
3026 3027          #xgetbv
3027 3028          .byte   0x0f,0x01,0xd0
3028 3029          ret
3029 3030          SET_SIZE(get_xcr)
3030 3031  
3031 3032          ENTRY(set_xcr)
3032 3033          movl    4(%esp), %ecx
3033 3034          movl    8(%esp), %eax
3034 3035          movl    12(%esp), %edx
3035 3036          #xsetbv
3036 3037          .byte   0x0f,0x01,0xd1
3037 3038          ret
3038 3039          SET_SIZE(set_xcr)
3039 3040  
3040 3041  #endif  /* __i386 */
3041 3042  
3042 3043          ENTRY(invalidate_cache)
3043 3044          wbinvd
3044 3045          ret
3045 3046          SET_SIZE(invalidate_cache)
3046 3047  
3047 3048  #endif  /* __lint */
3048 3049  
3049 3050  #if defined(__lint)
3050 3051  
3051 3052  /*ARGSUSED*/
3052 3053  void
3053 3054  getcregs(struct cregs *crp)
3054 3055  {}
3055 3056  
3056 3057  #else   /* __lint */
3057 3058  
3058 3059  #if defined(__amd64)
3059 3060  
3060 3061          ENTRY_NP(getcregs)
3061 3062  #if defined(__xpv)
3062 3063          /*
3063 3064           * Only a few of the hardware control registers or descriptor tables
3064 3065           * are directly accessible to us, so just zero the structure.
3065 3066           *
3066 3067           * XXPV Perhaps it would be helpful for the hypervisor to return
3067 3068           *      virtualized versions of these for post-mortem use.
3068 3069           *      (Need to reevaluate - perhaps it already does!)
3069 3070           */
3070 3071          pushq   %rdi            /* save *crp */
3071 3072          movq    $CREGSZ, %rsi
3072 3073          call    bzero
3073 3074          popq    %rdi
3074 3075  
3075 3076          /*
3076 3077           * Dump what limited information we can
3077 3078           */
3078 3079          movq    %cr0, %rax
3079 3080          movq    %rax, CREG_CR0(%rdi)    /* cr0 */
3080 3081          movq    %cr2, %rax
3081 3082          movq    %rax, CREG_CR2(%rdi)    /* cr2 */
3082 3083          movq    %cr3, %rax
3083 3084          movq    %rax, CREG_CR3(%rdi)    /* cr3 */
3084 3085          movq    %cr4, %rax
3085 3086          movq    %rax, CREG_CR4(%rdi)    /* cr4 */
3086 3087  
3087 3088  #else   /* __xpv */
3088 3089  
3089 3090  #define GETMSR(r, off, d)       \
3090 3091          movl    $r, %ecx;       \
3091 3092          rdmsr;                  \
3092 3093          movl    %eax, off(d);   \
3093 3094          movl    %edx, off+4(d)
3094 3095  
3095 3096          xorl    %eax, %eax
3096 3097          movq    %rax, CREG_GDT+8(%rdi)
3097 3098          sgdt    CREG_GDT(%rdi)          /* 10 bytes */
3098 3099          movq    %rax, CREG_IDT+8(%rdi)
3099 3100          sidt    CREG_IDT(%rdi)          /* 10 bytes */
3100 3101          movq    %rax, CREG_LDT(%rdi)
3101 3102          sldt    CREG_LDT(%rdi)          /* 2 bytes */
3102 3103          movq    %rax, CREG_TASKR(%rdi)
3103 3104          str     CREG_TASKR(%rdi)        /* 2 bytes */
3104 3105          movq    %cr0, %rax
3105 3106          movq    %rax, CREG_CR0(%rdi)    /* cr0 */
3106 3107          movq    %cr2, %rax
3107 3108          movq    %rax, CREG_CR2(%rdi)    /* cr2 */
3108 3109          movq    %cr3, %rax
3109 3110          movq    %rax, CREG_CR3(%rdi)    /* cr3 */
3110 3111          movq    %cr4, %rax
3111 3112          movq    %rax, CREG_CR4(%rdi)    /* cr4 */
3112 3113          movq    %cr8, %rax
3113 3114          movq    %rax, CREG_CR8(%rdi)    /* cr8 */
3114 3115          GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
3115 3116          GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
3116 3117  #endif  /* __xpv */
3117 3118          ret
3118 3119          SET_SIZE(getcregs)
3119 3120  
3120 3121  #undef GETMSR
3121 3122  
3122 3123  #elif defined(__i386)
3123 3124  
3124 3125          ENTRY_NP(getcregs)
3125 3126  #if defined(__xpv)
3126 3127          /*
3127 3128           * Only a few of the hardware control registers or descriptor tables
3128 3129           * are directly accessible to us, so just zero the structure.
3129 3130           *
3130 3131           * XXPV Perhaps it would be helpful for the hypervisor to return
3131 3132           *      virtualized versions of these for post-mortem use.
3132 3133           *      (Need to reevaluate - perhaps it already does!)
3133 3134           */
3134 3135          movl    4(%esp), %edx
3135 3136          pushl   $CREGSZ
3136 3137          pushl   %edx
3137 3138          call    bzero
3138 3139          addl    $8, %esp
3139 3140          movl    4(%esp), %edx
3140 3141  
3141 3142          /*
3142 3143           * Dump what limited information we can
3143 3144           */
3144 3145          movl    %cr0, %eax
3145 3146          movl    %eax, CREG_CR0(%edx)    /* cr0 */
3146 3147          movl    %cr2, %eax
3147 3148          movl    %eax, CREG_CR2(%edx)    /* cr2 */
3148 3149          movl    %cr3, %eax
3149 3150          movl    %eax, CREG_CR3(%edx)    /* cr3 */
3150 3151          movl    %cr4, %eax
3151 3152          movl    %eax, CREG_CR4(%edx)    /* cr4 */
3152 3153  
3153 3154  #else   /* __xpv */
3154 3155  
3155 3156          movl    4(%esp), %edx
3156 3157          movw    $0, CREG_GDT+6(%edx)
3157 3158          movw    $0, CREG_IDT+6(%edx)
3158 3159          sgdt    CREG_GDT(%edx)          /* gdt */
3159 3160          sidt    CREG_IDT(%edx)          /* idt */
3160 3161          sldt    CREG_LDT(%edx)          /* ldt */
3161 3162          str     CREG_TASKR(%edx)        /* task */
3162 3163          movl    %cr0, %eax
3163 3164          movl    %eax, CREG_CR0(%edx)    /* cr0 */
3164 3165          movl    %cr2, %eax
3165 3166          movl    %eax, CREG_CR2(%edx)    /* cr2 */
3166 3167          movl    %cr3, %eax
3167 3168          movl    %eax, CREG_CR3(%edx)    /* cr3 */
3168 3169          bt      $X86FSET_LARGEPAGE, x86_featureset
3169 3170          jnc     .nocr4
3170 3171          movl    %cr4, %eax
3171 3172          movl    %eax, CREG_CR4(%edx)    /* cr4 */
3172 3173          jmp     .skip
3173 3174  .nocr4:
3174 3175          movl    $0, CREG_CR4(%edx)
3175 3176  .skip:
3176 3177  #endif
3177 3178          ret
3178 3179          SET_SIZE(getcregs)
3179 3180  
3180 3181  #endif  /* __i386 */
3181 3182  #endif  /* __lint */
3182 3183  
3183 3184  
3184 3185  /*
3185 3186   * A panic trigger is a word which is updated atomically and can only be set
3186 3187   * once.  We atomically store 0xDEFACEDD and load the old value.  If the
3187 3188   * previous value was 0, we succeed and return 1; otherwise return 0.
3188 3189   * This allows a partially corrupt trigger to still trigger correctly.  DTrace
3189 3190   * has its own version of this function to allow it to panic correctly from
3190 3191   * probe context.
3191 3192   */
3192 3193  #if defined(__lint)
3193 3194  
3194 3195  /*ARGSUSED*/
3195 3196  int
3196 3197  panic_trigger(int *tp)
3197 3198  { return (0); }
3198 3199  
3199 3200  /*ARGSUSED*/
3200 3201  int
3201 3202  dtrace_panic_trigger(int *tp)
3202 3203  { return (0); }
3203 3204  
3204 3205  #else   /* __lint */
3205 3206  
3206 3207  #if defined(__amd64)
3207 3208  
3208 3209          ENTRY_NP(panic_trigger)
3209 3210          xorl    %eax, %eax
3210 3211          movl    $0xdefacedd, %edx
3211 3212          lock
3212 3213            xchgl %edx, (%rdi)
3213 3214          cmpl    $0, %edx
3214 3215          je      0f 
3215 3216          movl    $0, %eax
3216 3217          ret
3217 3218  0:      movl    $1, %eax
3218 3219          ret
3219 3220          SET_SIZE(panic_trigger)
3220 3221          
3221 3222          ENTRY_NP(dtrace_panic_trigger)
3222 3223          xorl    %eax, %eax
3223 3224          movl    $0xdefacedd, %edx
3224 3225          lock
3225 3226            xchgl %edx, (%rdi)
3226 3227          cmpl    $0, %edx
3227 3228          je      0f
3228 3229          movl    $0, %eax
3229 3230          ret
3230 3231  0:      movl    $1, %eax
3231 3232          ret
3232 3233          SET_SIZE(dtrace_panic_trigger)
3233 3234  
3234 3235  #elif defined(__i386)
3235 3236  
3236 3237          ENTRY_NP(panic_trigger)
3237 3238          movl    4(%esp), %edx           / %edx = address of trigger
3238 3239          movl    $0xdefacedd, %eax       / %eax = 0xdefacedd
3239 3240          lock                            / assert lock
3240 3241          xchgl %eax, (%edx)              / exchange %eax and the trigger
3241 3242          cmpl    $0, %eax                / if (%eax == 0x0)
3242 3243          je      0f                      /   return (1);
3243 3244          movl    $0, %eax                / else
3244 3245          ret                             /   return (0);
3245 3246  0:      movl    $1, %eax
3246 3247          ret
3247 3248          SET_SIZE(panic_trigger)
3248 3249  
3249 3250          ENTRY_NP(dtrace_panic_trigger)
3250 3251          movl    4(%esp), %edx           / %edx = address of trigger
3251 3252          movl    $0xdefacedd, %eax       / %eax = 0xdefacedd
3252 3253          lock                            / assert lock
3253 3254          xchgl %eax, (%edx)              / exchange %eax and the trigger
3254 3255          cmpl    $0, %eax                / if (%eax == 0x0)
3255 3256          je      0f                      /   return (1);
3256 3257          movl    $0, %eax                / else
3257 3258          ret                             /   return (0);
3258 3259  0:      movl    $1, %eax
3259 3260          ret
3260 3261          SET_SIZE(dtrace_panic_trigger)
3261 3262  
3262 3263  #endif  /* __i386 */
3263 3264  #endif  /* __lint */
3264 3265  
3265 3266  /*
3266 3267   * The panic() and cmn_err() functions invoke vpanic() as a common entry point
3267 3268   * into the panic code implemented in panicsys().  vpanic() is responsible
3268 3269   * for passing through the format string and arguments, and constructing a
3269 3270   * regs structure on the stack into which it saves the current register
3270 3271   * values.  If we are not dying due to a fatal trap, these registers will
3271 3272   * then be preserved in panicbuf as the current processor state.  Before
3272 3273   * invoking panicsys(), vpanic() activates the first panic trigger (see
3273 3274   * common/os/panic.c) and switches to the panic_stack if successful.  Note that
3274 3275   * DTrace takes a slightly different panic path if it must panic from probe
3275 3276   * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
3276 3277   * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
3277 3278   * branches back into vpanic().
3278 3279   */
3279 3280  #if defined(__lint)
3280 3281  
3281 3282  /*ARGSUSED*/
3282 3283  void
3283 3284  vpanic(const char *format, va_list alist)
3284 3285  {}
3285 3286  
3286 3287  /*ARGSUSED*/
3287 3288  void
3288 3289  dtrace_vpanic(const char *format, va_list alist)
3289 3290  {}
3290 3291  
3291 3292  #else   /* __lint */
3292 3293  
3293 3294  #if defined(__amd64)
3294 3295  
3295 3296          ENTRY_NP(vpanic)                        /* Initial stack layout: */
3296 3297          
3297 3298          pushq   %rbp                            /* | %rip |     0x60    */
3298 3299          movq    %rsp, %rbp                      /* | %rbp |     0x58    */
3299 3300          pushfq                                  /* | rfl  |     0x50    */
3300 3301          pushq   %r11                            /* | %r11 |     0x48    */
3301 3302          pushq   %r10                            /* | %r10 |     0x40    */
3302 3303          pushq   %rbx                            /* | %rbx |     0x38    */
3303 3304          pushq   %rax                            /* | %rax |     0x30    */
3304 3305          pushq   %r9                             /* | %r9  |     0x28    */
3305 3306          pushq   %r8                             /* | %r8  |     0x20    */
3306 3307          pushq   %rcx                            /* | %rcx |     0x18    */
3307 3308          pushq   %rdx                            /* | %rdx |     0x10    */
3308 3309          pushq   %rsi                            /* | %rsi |     0x8 alist */
3309 3310          pushq   %rdi                            /* | %rdi |     0x0 format */
3310 3311  
3311 3312          movq    %rsp, %rbx                      /* %rbx = current %rsp */
3312 3313  
3313 3314          leaq    panic_quiesce(%rip), %rdi       /* %rdi = &panic_quiesce */
3314 3315          call    panic_trigger                   /* %eax = panic_trigger() */
3315 3316  
3316 3317  vpanic_common:
3317 3318          /*
3318 3319           * The panic_trigger result is in %eax from the call above, and
3319 3320           * dtrace_panic places it in %eax before branching here.
3320 3321           * The rdmsr instructions that follow below will clobber %eax so
3321 3322           * we stash the panic_trigger result in %r11d.
3322 3323           */
3323 3324          movl    %eax, %r11d
3324 3325          cmpl    $0, %r11d
3325 3326          je      0f
3326 3327  
3327 3328          /*
3328 3329           * If panic_trigger() was successful, we are the first to initiate a
3329 3330           * panic: we now switch to the reserved panic_stack before continuing.
3330 3331           */
3331 3332          leaq    panic_stack(%rip), %rsp
3332 3333          addq    $PANICSTKSIZE, %rsp
3333 3334  0:      subq    $REGSIZE, %rsp
3334 3335          /*
3335 3336           * Now that we've got everything set up, store the register values as
3336 3337           * they were when we entered vpanic() to the designated location in
3337 3338           * the regs structure we allocated on the stack.
3338 3339           */
3339 3340          movq    0x0(%rbx), %rcx
3340 3341          movq    %rcx, REGOFF_RDI(%rsp)
3341 3342          movq    0x8(%rbx), %rcx
3342 3343          movq    %rcx, REGOFF_RSI(%rsp)
3343 3344          movq    0x10(%rbx), %rcx
3344 3345          movq    %rcx, REGOFF_RDX(%rsp)
3345 3346          movq    0x18(%rbx), %rcx
3346 3347          movq    %rcx, REGOFF_RCX(%rsp)
3347 3348          movq    0x20(%rbx), %rcx
3348 3349  
3349 3350          movq    %rcx, REGOFF_R8(%rsp)
3350 3351          movq    0x28(%rbx), %rcx
3351 3352          movq    %rcx, REGOFF_R9(%rsp)
3352 3353          movq    0x30(%rbx), %rcx
3353 3354          movq    %rcx, REGOFF_RAX(%rsp)
3354 3355          movq    0x38(%rbx), %rcx
3355 3356          movq    %rcx, REGOFF_RBX(%rsp)
3356 3357          movq    0x58(%rbx), %rcx
3357 3358  
3358 3359          movq    %rcx, REGOFF_RBP(%rsp)
3359 3360          movq    0x40(%rbx), %rcx
3360 3361          movq    %rcx, REGOFF_R10(%rsp)
3361 3362          movq    0x48(%rbx), %rcx
3362 3363          movq    %rcx, REGOFF_R11(%rsp)
3363 3364          movq    %r12, REGOFF_R12(%rsp)
3364 3365  
3365 3366          movq    %r13, REGOFF_R13(%rsp)
3366 3367          movq    %r14, REGOFF_R14(%rsp)
3367 3368          movq    %r15, REGOFF_R15(%rsp)
3368 3369  
3369 3370          xorl    %ecx, %ecx
3370 3371          movw    %ds, %cx
3371 3372          movq    %rcx, REGOFF_DS(%rsp)
3372 3373          movw    %es, %cx
3373 3374          movq    %rcx, REGOFF_ES(%rsp)
3374 3375          movw    %fs, %cx
3375 3376          movq    %rcx, REGOFF_FS(%rsp)
3376 3377          movw    %gs, %cx
3377 3378          movq    %rcx, REGOFF_GS(%rsp)
3378 3379  
3379 3380          movq    $0, REGOFF_TRAPNO(%rsp)
3380 3381  
3381 3382          movq    $0, REGOFF_ERR(%rsp)
3382 3383          leaq    vpanic(%rip), %rcx
3383 3384          movq    %rcx, REGOFF_RIP(%rsp)
3384 3385          movw    %cs, %cx
3385 3386          movzwq  %cx, %rcx
3386 3387          movq    %rcx, REGOFF_CS(%rsp)
3387 3388          movq    0x50(%rbx), %rcx
3388 3389          movq    %rcx, REGOFF_RFL(%rsp)
3389 3390          movq    %rbx, %rcx
3390 3391          addq    $0x60, %rcx
3391 3392          movq    %rcx, REGOFF_RSP(%rsp)
3392 3393          movw    %ss, %cx
3393 3394          movzwq  %cx, %rcx
3394 3395          movq    %rcx, REGOFF_SS(%rsp)
3395 3396  
3396 3397          /*
3397 3398           * panicsys(format, alist, rp, on_panic_stack) 
3398 3399           */     
3399 3400          movq    REGOFF_RDI(%rsp), %rdi          /* format */
3400 3401          movq    REGOFF_RSI(%rsp), %rsi          /* alist */
3401 3402          movq    %rsp, %rdx                      /* struct regs */
3402 3403          movl    %r11d, %ecx                     /* on_panic_stack */
3403 3404          call    panicsys
3404 3405          addq    $REGSIZE, %rsp
3405 3406          popq    %rdi
3406 3407          popq    %rsi
3407 3408          popq    %rdx
3408 3409          popq    %rcx
3409 3410          popq    %r8
3410 3411          popq    %r9
3411 3412          popq    %rax
3412 3413          popq    %rbx
3413 3414          popq    %r10
3414 3415          popq    %r11
3415 3416          popfq
3416 3417          leave
3417 3418          ret
3418 3419          SET_SIZE(vpanic)
3419 3420  
3420 3421          ENTRY_NP(dtrace_vpanic)                 /* Initial stack layout: */
3421 3422  
3422 3423          pushq   %rbp                            /* | %rip |     0x60    */
3423 3424          movq    %rsp, %rbp                      /* | %rbp |     0x58    */
3424 3425          pushfq                                  /* | rfl  |     0x50    */
3425 3426          pushq   %r11                            /* | %r11 |     0x48    */
3426 3427          pushq   %r10                            /* | %r10 |     0x40    */
3427 3428          pushq   %rbx                            /* | %rbx |     0x38    */
3428 3429          pushq   %rax                            /* | %rax |     0x30    */
3429 3430          pushq   %r9                             /* | %r9  |     0x28    */
3430 3431          pushq   %r8                             /* | %r8  |     0x20    */
3431 3432          pushq   %rcx                            /* | %rcx |     0x18    */
3432 3433          pushq   %rdx                            /* | %rdx |     0x10    */
3433 3434          pushq   %rsi                            /* | %rsi |     0x8 alist */
3434 3435          pushq   %rdi                            /* | %rdi |     0x0 format */
3435 3436  
3436 3437          movq    %rsp, %rbx                      /* %rbx = current %rsp */
3437 3438  
3438 3439          leaq    panic_quiesce(%rip), %rdi       /* %rdi = &panic_quiesce */
3439 3440          call    dtrace_panic_trigger    /* %eax = dtrace_panic_trigger() */
3440 3441          jmp     vpanic_common
3441 3442  
3442 3443          SET_SIZE(dtrace_vpanic)
3443 3444  
3444 3445  #elif defined(__i386)
3445 3446  
3446 3447          ENTRY_NP(vpanic)                        / Initial stack layout:
3447 3448  
3448 3449          pushl   %ebp                            / | %eip | 20
3449 3450          movl    %esp, %ebp                      / | %ebp | 16
3450 3451          pushl   %eax                            / | %eax | 12
3451 3452          pushl   %ebx                            / | %ebx |  8
3452 3453          pushl   %ecx                            / | %ecx |  4
3453 3454          pushl   %edx                            / | %edx |  0
3454 3455  
3455 3456          movl    %esp, %ebx                      / %ebx = current stack pointer
3456 3457  
3457 3458          lea     panic_quiesce, %eax             / %eax = &panic_quiesce
3458 3459          pushl   %eax                            / push &panic_quiesce
3459 3460          call    panic_trigger                   / %eax = panic_trigger()
3460 3461          addl    $4, %esp                        / reset stack pointer
3461 3462  
3462 3463  vpanic_common:
3463 3464          cmpl    $0, %eax                        / if (%eax == 0)
3464 3465          je      0f                              /   goto 0f;
3465 3466  
3466 3467          /*
3467 3468           * If panic_trigger() was successful, we are the first to initiate a
3468 3469           * panic: we now switch to the reserved panic_stack before continuing.
3469 3470           */
3470 3471          lea     panic_stack, %esp               / %esp  = panic_stack
3471 3472          addl    $PANICSTKSIZE, %esp             / %esp += PANICSTKSIZE
3472 3473  
3473 3474  0:      subl    $REGSIZE, %esp                  / allocate struct regs
3474 3475  
3475 3476          /*
3476 3477           * Now that we've got everything set up, store the register values as
3477 3478           * they were when we entered vpanic() to the designated location in
3478 3479           * the regs structure we allocated on the stack. 
3479 3480           */
3480 3481  #if !defined(__GNUC_AS__)
3481 3482          movw    %gs, %edx
3482 3483          movl    %edx, REGOFF_GS(%esp)
3483 3484          movw    %fs, %edx
3484 3485          movl    %edx, REGOFF_FS(%esp)
3485 3486          movw    %es, %edx
3486 3487          movl    %edx, REGOFF_ES(%esp)
3487 3488          movw    %ds, %edx
3488 3489          movl    %edx, REGOFF_DS(%esp)
3489 3490  #else   /* __GNUC_AS__ */
3490 3491          mov     %gs, %edx
3491 3492          mov     %edx, REGOFF_GS(%esp)
3492 3493          mov     %fs, %edx
3493 3494          mov     %edx, REGOFF_FS(%esp)
3494 3495          mov     %es, %edx
3495 3496          mov     %edx, REGOFF_ES(%esp)
3496 3497          mov     %ds, %edx
3497 3498          mov     %edx, REGOFF_DS(%esp)
3498 3499  #endif  /* __GNUC_AS__ */
3499 3500          movl    %edi, REGOFF_EDI(%esp)
3500 3501          movl    %esi, REGOFF_ESI(%esp)
3501 3502          movl    16(%ebx), %ecx
3502 3503          movl    %ecx, REGOFF_EBP(%esp)
3503 3504          movl    %ebx, %ecx
3504 3505          addl    $20, %ecx
3505 3506          movl    %ecx, REGOFF_ESP(%esp)
3506 3507          movl    8(%ebx), %ecx
3507 3508          movl    %ecx, REGOFF_EBX(%esp)
3508 3509          movl    0(%ebx), %ecx
3509 3510          movl    %ecx, REGOFF_EDX(%esp)
3510 3511          movl    4(%ebx), %ecx
3511 3512          movl    %ecx, REGOFF_ECX(%esp)
3512 3513          movl    12(%ebx), %ecx
3513 3514          movl    %ecx, REGOFF_EAX(%esp)
3514 3515          movl    $0, REGOFF_TRAPNO(%esp)
3515 3516          movl    $0, REGOFF_ERR(%esp)
3516 3517          lea     vpanic, %ecx
3517 3518          movl    %ecx, REGOFF_EIP(%esp)
3518 3519  #if !defined(__GNUC_AS__)
3519 3520          movw    %cs, %edx
3520 3521  #else   /* __GNUC_AS__ */
3521 3522          mov     %cs, %edx
3522 3523  #endif  /* __GNUC_AS__ */
3523 3524          movl    %edx, REGOFF_CS(%esp)
3524 3525          pushfl
3525 3526          popl    %ecx
3526 3527  #if defined(__xpv)
3527 3528          /*
3528 3529           * Synthesize the PS_IE bit from the event mask bit
3529 3530           */
3530 3531          CURTHREAD(%edx)
3531 3532          KPREEMPT_DISABLE(%edx)
3532 3533          EVENT_MASK_TO_IE(%edx, %ecx)
3533 3534          CURTHREAD(%edx)
3534 3535          KPREEMPT_ENABLE_NOKP(%edx)
3535 3536  #endif
3536 3537          movl    %ecx, REGOFF_EFL(%esp)
3537 3538          movl    $0, REGOFF_UESP(%esp)
3538 3539  #if !defined(__GNUC_AS__)
3539 3540          movw    %ss, %edx
3540 3541  #else   /* __GNUC_AS__ */
3541 3542          mov     %ss, %edx
3542 3543  #endif  /* __GNUC_AS__ */
3543 3544          movl    %edx, REGOFF_SS(%esp)
3544 3545  
3545 3546          movl    %esp, %ecx                      / %ecx = ®s
3546 3547          pushl   %eax                            / push on_panic_stack
3547 3548          pushl   %ecx                            / push ®s
3548 3549          movl    12(%ebp), %ecx                  / %ecx = alist
3549 3550          pushl   %ecx                            / push alist
3550 3551          movl    8(%ebp), %ecx                   / %ecx = format
3551 3552          pushl   %ecx                            / push format
3552 3553          call    panicsys                        / panicsys();
3553 3554          addl    $16, %esp                       / pop arguments
3554 3555  
3555 3556          addl    $REGSIZE, %esp
3556 3557          popl    %edx
3557 3558          popl    %ecx
3558 3559          popl    %ebx
3559 3560          popl    %eax
3560 3561          leave
3561 3562          ret
3562 3563          SET_SIZE(vpanic)
3563 3564  
3564 3565          ENTRY_NP(dtrace_vpanic)                 / Initial stack layout:
3565 3566  
3566 3567          pushl   %ebp                            / | %eip | 20
3567 3568          movl    %esp, %ebp                      / | %ebp | 16
3568 3569          pushl   %eax                            / | %eax | 12
3569 3570          pushl   %ebx                            / | %ebx |  8
3570 3571          pushl   %ecx                            / | %ecx |  4
3571 3572          pushl   %edx                            / | %edx |  0
3572 3573  
3573 3574          movl    %esp, %ebx                      / %ebx = current stack pointer
3574 3575  
3575 3576          lea     panic_quiesce, %eax             / %eax = &panic_quiesce
3576 3577          pushl   %eax                            / push &panic_quiesce
3577 3578          call    dtrace_panic_trigger            / %eax = dtrace_panic_trigger()
3578 3579          addl    $4, %esp                        / reset stack pointer
3579 3580          jmp     vpanic_common                   / jump back to common code
3580 3581  
3581 3582          SET_SIZE(dtrace_vpanic)
3582 3583  
  
    | 
      ↓ open down ↓ | 
    3547 lines elided | 
    
      ↑ open up ↑ | 
  
3583 3584  #endif  /* __i386 */
3584 3585  #endif  /* __lint */
3585 3586  
3586 3587  #if defined(__lint)
3587 3588  
3588 3589  void
3589 3590  hres_tick(void)
3590 3591  {}
3591 3592  
3592 3593  int64_t timedelta;
3593      -hrtime_t hres_last_tick;
3594      -volatile timestruc_t hrestime;
3595      -int64_t hrestime_adj;
3596      -volatile int hres_lock;
3597 3594  hrtime_t hrtime_base;
3598 3595  
3599 3596  #else   /* __lint */
3600 3597  
3601      -        DGDEF3(hrestime, _MUL(2, CLONGSIZE), 8)
3602      -        .NWORD  0, 0
3603      -
3604      -        DGDEF3(hrestime_adj, 8, 8)
3605      -        .long   0, 0
3606      -
3607      -        DGDEF3(hres_last_tick, 8, 8)
3608      -        .long   0, 0
3609      -
3610 3598          DGDEF3(timedelta, 8, 8)
3611 3599          .long   0, 0
3612 3600  
3613      -        DGDEF3(hres_lock, 4, 8)
3614      -        .long   0
3615      -
3616 3601          /*
3617 3602           * initialized to a non zero value to make pc_gethrtime()
3618 3603           * work correctly even before clock is initialized
3619 3604           */
3620 3605          DGDEF3(hrtime_base, 8, 8)
3621 3606          .long   _MUL(NSEC_PER_CLOCK_TICK, 6), 0
3622 3607  
3623 3608          DGDEF3(adj_shift, 4, 4)
3624 3609          .long   ADJ_SHIFT
3625 3610  
3626 3611  #if defined(__amd64)
3627 3612  
3628 3613          ENTRY_NP(hres_tick)
3629 3614          pushq   %rbp
3630 3615          movq    %rsp, %rbp
3631 3616  
3632 3617          /*
3633 3618           * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3634 3619           * hres_last_tick can only be modified while holding CLOCK_LOCK).
3635 3620           * At worst, performing this now instead of under CLOCK_LOCK may
3636 3621           * introduce some jitter in pc_gethrestime().
3637 3622           */
3638 3623          call    *gethrtimef(%rip)
3639 3624          movq    %rax, %r8
3640 3625  
3641 3626          leaq    hres_lock(%rip), %rax
3642 3627          movb    $-1, %dl
3643 3628  .CL1:
3644 3629          xchgb   %dl, (%rax)
3645 3630          testb   %dl, %dl
3646 3631          jz      .CL3                    /* got it */
3647 3632  .CL2:
3648 3633          cmpb    $0, (%rax)              /* possible to get lock? */
3649 3634          pause
3650 3635          jne     .CL2
3651 3636          jmp     .CL1                    /* yes, try again */
3652 3637  .CL3:
3653 3638          /*
3654 3639           * compute the interval since last time hres_tick was called
3655 3640           * and adjust hrtime_base and hrestime accordingly
3656 3641           * hrtime_base is an 8 byte value (in nsec), hrestime is
3657 3642           * a timestruc_t (sec, nsec)
3658 3643           */
3659 3644          leaq    hres_last_tick(%rip), %rax
3660 3645          movq    %r8, %r11
3661 3646          subq    (%rax), %r8
3662 3647          addq    %r8, hrtime_base(%rip)  /* add interval to hrtime_base */
3663 3648          addq    %r8, hrestime+8(%rip)   /* add interval to hrestime.tv_nsec */
3664 3649          /*
3665 3650           * Now that we have CLOCK_LOCK, we can update hres_last_tick
3666 3651           */     
3667 3652          movq    %r11, (%rax)    
3668 3653  
3669 3654          call    __adj_hrestime
3670 3655  
3671 3656          /*
3672 3657           * release the hres_lock
3673 3658           */
3674 3659          incl    hres_lock(%rip)
3675 3660          leave
3676 3661          ret
3677 3662          SET_SIZE(hres_tick)
3678 3663          
3679 3664  #elif defined(__i386)
3680 3665  
3681 3666          ENTRY_NP(hres_tick)
3682 3667          pushl   %ebp
3683 3668          movl    %esp, %ebp
3684 3669          pushl   %esi
3685 3670          pushl   %ebx
3686 3671  
3687 3672          /*
3688 3673           * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3689 3674           * hres_last_tick can only be modified while holding CLOCK_LOCK).
3690 3675           * At worst, performing this now instead of under CLOCK_LOCK may
3691 3676           * introduce some jitter in pc_gethrestime().
3692 3677           */
3693 3678          call    *gethrtimef
3694 3679          movl    %eax, %ebx
3695 3680          movl    %edx, %esi
3696 3681  
3697 3682          movl    $hres_lock, %eax
3698 3683          movl    $-1, %edx
3699 3684  .CL1:
3700 3685          xchgb   %dl, (%eax)
3701 3686          testb   %dl, %dl
3702 3687          jz      .CL3                    / got it
3703 3688  .CL2:
3704 3689          cmpb    $0, (%eax)              / possible to get lock?
3705 3690          pause
3706 3691          jne     .CL2
3707 3692          jmp     .CL1                    / yes, try again
3708 3693  .CL3:
3709 3694          /*
3710 3695           * compute the interval since last time hres_tick was called
3711 3696           * and adjust hrtime_base and hrestime accordingly
3712 3697           * hrtime_base is an 8 byte value (in nsec), hrestime is
3713 3698           * timestruc_t (sec, nsec)
3714 3699           */
3715 3700  
3716 3701          lea     hres_last_tick, %eax
3717 3702  
3718 3703          movl    %ebx, %edx
3719 3704          movl    %esi, %ecx
3720 3705  
3721 3706          subl    (%eax), %edx
3722 3707          sbbl    4(%eax), %ecx
3723 3708  
3724 3709          addl    %edx, hrtime_base       / add interval to hrtime_base
3725 3710          adcl    %ecx, hrtime_base+4
3726 3711  
3727 3712          addl    %edx, hrestime+4        / add interval to hrestime.tv_nsec
3728 3713  
3729 3714          /
3730 3715          / Now that we have CLOCK_LOCK, we can update hres_last_tick.
3731 3716          /
3732 3717          movl    %ebx, (%eax)
3733 3718          movl    %esi,  4(%eax)
3734 3719  
3735 3720          / get hrestime at this moment. used as base for pc_gethrestime
3736 3721          /
3737 3722          / Apply adjustment, if any
3738 3723          /
3739 3724          / #define HRES_ADJ      (NSEC_PER_CLOCK_TICK >> ADJ_SHIFT)
3740 3725          / (max_hres_adj)
3741 3726          /
3742 3727          / void
3743 3728          / adj_hrestime()
3744 3729          / {
3745 3730          /       long long adj;
3746 3731          /
3747 3732          /       if (hrestime_adj == 0)
3748 3733          /               adj = 0;
3749 3734          /       else if (hrestime_adj > 0) {
3750 3735          /               if (hrestime_adj < HRES_ADJ)
3751 3736          /                       adj = hrestime_adj;
3752 3737          /               else
3753 3738          /                       adj = HRES_ADJ;
3754 3739          /       }
3755 3740          /       else {
3756 3741          /               if (hrestime_adj < -(HRES_ADJ))
3757 3742          /                       adj = -(HRES_ADJ);
3758 3743          /               else
3759 3744          /                       adj = hrestime_adj;
3760 3745          /       }
3761 3746          /
3762 3747          /       timedelta -= adj;
3763 3748          /       hrestime_adj = timedelta;
3764 3749          /       hrestime.tv_nsec += adj;
3765 3750          /
3766 3751          /       while (hrestime.tv_nsec >= NANOSEC) {
3767 3752          /               one_sec++;
3768 3753          /               hrestime.tv_sec++;
3769 3754          /               hrestime.tv_nsec -= NANOSEC;
3770 3755          /       }
3771 3756          / }
3772 3757  __adj_hrestime:
3773 3758          movl    hrestime_adj, %esi      / if (hrestime_adj == 0)
3774 3759          movl    hrestime_adj+4, %edx
3775 3760          andl    %esi, %esi
3776 3761          jne     .CL4                    / no
3777 3762          andl    %edx, %edx
3778 3763          jne     .CL4                    / no
3779 3764          subl    %ecx, %ecx              / yes, adj = 0;
3780 3765          subl    %edx, %edx
3781 3766          jmp     .CL5
3782 3767  .CL4:
3783 3768          subl    %ecx, %ecx
3784 3769          subl    %eax, %eax
3785 3770          subl    %esi, %ecx
3786 3771          sbbl    %edx, %eax
3787 3772          andl    %eax, %eax              / if (hrestime_adj > 0)
3788 3773          jge     .CL6
3789 3774  
3790 3775          / In the following comments, HRES_ADJ is used, while in the code
3791 3776          / max_hres_adj is used.
3792 3777          /
3793 3778          / The test for "hrestime_adj < HRES_ADJ" is complicated because
3794 3779          / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3795 3780          / on the logical equivalence of:
3796 3781          /
3797 3782          /       !(hrestime_adj < HRES_ADJ)
3798 3783          /
3799 3784          / and the two step sequence:
3800 3785          /
3801 3786          /       (HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry
3802 3787          /
3803 3788          / which computes whether or not the least significant 32-bits
3804 3789          / of hrestime_adj is greater than HRES_ADJ, followed by:
3805 3790          /
3806 3791          /       Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry
3807 3792          /
3808 3793          / which generates a carry whenever step 1 is true or the most
3809 3794          / significant long of the longlong hrestime_adj is non-zero.
3810 3795  
3811 3796          movl    max_hres_adj, %ecx      / hrestime_adj is positive
3812 3797          subl    %esi, %ecx
3813 3798          movl    %edx, %eax
3814 3799          adcl    $-1, %eax
3815 3800          jnc     .CL7
3816 3801          movl    max_hres_adj, %ecx      / adj = HRES_ADJ;
3817 3802          subl    %edx, %edx
3818 3803          jmp     .CL5
3819 3804  
3820 3805          / The following computation is similar to the one above.
3821 3806          /
3822 3807          / The test for "hrestime_adj < -(HRES_ADJ)" is complicated because
3823 3808          / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3824 3809          / on the logical equivalence of:
3825 3810          /
3826 3811          /       (hrestime_adj > -HRES_ADJ)
3827 3812          /
3828 3813          / and the two step sequence:
3829 3814          /
3830 3815          /       (HRES_ADJ + lsw(hrestime_adj)) generates a Carry
3831 3816          /
3832 3817          / which means the least significant 32-bits of hrestime_adj is
3833 3818          / greater than -HRES_ADJ, followed by:
3834 3819          /
3835 3820          /       Previous Carry + 0 + msw(hrestime_adj) generates a Carry
3836 3821          /
3837 3822          / which generates a carry only when step 1 is true and the most
3838 3823          / significant long of the longlong hrestime_adj is -1.
3839 3824  
3840 3825  .CL6:                                   / hrestime_adj is negative
3841 3826          movl    %esi, %ecx
3842 3827          addl    max_hres_adj, %ecx
3843 3828          movl    %edx, %eax
3844 3829          adcl    $0, %eax
3845 3830          jc      .CL7
3846 3831          xor     %ecx, %ecx
3847 3832          subl    max_hres_adj, %ecx      / adj = -(HRES_ADJ);
3848 3833          movl    $-1, %edx
3849 3834          jmp     .CL5
3850 3835  .CL7:
3851 3836          movl    %esi, %ecx              / adj = hrestime_adj;
3852 3837  .CL5:
3853 3838          movl    timedelta, %esi
3854 3839          subl    %ecx, %esi
3855 3840          movl    timedelta+4, %eax
3856 3841          sbbl    %edx, %eax
3857 3842          movl    %esi, timedelta
3858 3843          movl    %eax, timedelta+4       / timedelta -= adj;
3859 3844          movl    %esi, hrestime_adj
3860 3845          movl    %eax, hrestime_adj+4    / hrestime_adj = timedelta;
3861 3846          addl    hrestime+4, %ecx
3862 3847  
3863 3848          movl    %ecx, %eax              / eax = tv_nsec
3864 3849  1:
3865 3850          cmpl    $NANOSEC, %eax          / if ((unsigned long)tv_nsec >= NANOSEC)
3866 3851          jb      .CL8                    / no
3867 3852          incl    one_sec                 / yes,  one_sec++;
3868 3853          incl    hrestime                / hrestime.tv_sec++;
3869 3854          addl    $-NANOSEC, %eax         / tv_nsec -= NANOSEC
3870 3855          jmp     1b                      / check for more seconds
3871 3856  
3872 3857  .CL8:
3873 3858          movl    %eax, hrestime+4        / store final into hrestime.tv_nsec
3874 3859          incl    hres_lock               / release the hres_lock
3875 3860  
3876 3861          popl    %ebx
3877 3862          popl    %esi
3878 3863          leave
3879 3864          ret
3880 3865          SET_SIZE(hres_tick)
3881 3866  
3882 3867  #endif  /* __i386 */
3883 3868  #endif  /* __lint */
3884 3869  
3885 3870  /*
3886 3871   * void prefetch_smap_w(void *)
3887 3872   *
3888 3873   * Prefetch ahead within a linear list of smap structures.
3889 3874   * Not implemented for ia32.  Stub for compatibility.
3890 3875   */
3891 3876  
3892 3877  #if defined(__lint)
3893 3878  
3894 3879  /*ARGSUSED*/
3895 3880  void prefetch_smap_w(void *smp)
3896 3881  {}
3897 3882  
3898 3883  #else   /* __lint */
3899 3884  
3900 3885          ENTRY(prefetch_smap_w)
3901 3886          rep;    ret     /* use 2 byte return instruction when branch target */
3902 3887                          /* AMD Software Optimization Guide - Section 6.2 */
3903 3888          SET_SIZE(prefetch_smap_w)
3904 3889  
3905 3890  #endif  /* __lint */
3906 3891  
3907 3892  /*
3908 3893   * prefetch_page_r(page_t *)
3909 3894   * issue prefetch instructions for a page_t
3910 3895   */
3911 3896  #if defined(__lint)
3912 3897  
3913 3898  /*ARGSUSED*/
3914 3899  void
3915 3900  prefetch_page_r(void *pp)
3916 3901  {}
3917 3902  
3918 3903  #else   /* __lint */
3919 3904  
3920 3905          ENTRY(prefetch_page_r)
3921 3906          rep;    ret     /* use 2 byte return instruction when branch target */
3922 3907                          /* AMD Software Optimization Guide - Section 6.2 */
3923 3908          SET_SIZE(prefetch_page_r)
3924 3909  
3925 3910  #endif  /* __lint */
3926 3911  
3927 3912  #if defined(__lint)
3928 3913  
3929 3914  /*ARGSUSED*/
3930 3915  int
3931 3916  bcmp(const void *s1, const void *s2, size_t count)
3932 3917  { return (0); }
3933 3918  
3934 3919  #else   /* __lint */
3935 3920  
3936 3921  #if defined(__amd64)
3937 3922  
3938 3923          ENTRY(bcmp)
3939 3924          pushq   %rbp
3940 3925          movq    %rsp, %rbp
3941 3926  #ifdef DEBUG
3942 3927          testq   %rdx,%rdx
3943 3928          je      1f
3944 3929          movq    postbootkernelbase(%rip), %r11
3945 3930          cmpq    %r11, %rdi
3946 3931          jb      0f
3947 3932          cmpq    %r11, %rsi
3948 3933          jnb     1f
3949 3934  0:      leaq    .bcmp_panic_msg(%rip), %rdi
3950 3935          xorl    %eax, %eax
3951 3936          call    panic
3952 3937  1:
3953 3938  #endif  /* DEBUG */
3954 3939          call    memcmp
3955 3940          testl   %eax, %eax
3956 3941          setne   %dl
3957 3942          leave
3958 3943          movzbl  %dl, %eax
3959 3944          ret
3960 3945          SET_SIZE(bcmp)
3961 3946          
3962 3947  #elif defined(__i386)
3963 3948          
3964 3949  #define ARG_S1          8
3965 3950  #define ARG_S2          12
3966 3951  #define ARG_LENGTH      16
3967 3952  
3968 3953          ENTRY(bcmp)
3969 3954          pushl   %ebp
3970 3955          movl    %esp, %ebp      / create new stack frame
3971 3956  #ifdef DEBUG
3972 3957          cmpl    $0, ARG_LENGTH(%ebp)
3973 3958          je      1f
3974 3959          movl    postbootkernelbase, %eax
3975 3960          cmpl    %eax, ARG_S1(%ebp)
3976 3961          jb      0f
3977 3962          cmpl    %eax, ARG_S2(%ebp)
3978 3963          jnb     1f
3979 3964  0:      pushl   $.bcmp_panic_msg
3980 3965          call    panic
3981 3966  1:
3982 3967  #endif  /* DEBUG */
3983 3968  
3984 3969          pushl   %edi            / save register variable
3985 3970          movl    ARG_S1(%ebp), %eax      / %eax = address of string 1
3986 3971          movl    ARG_S2(%ebp), %ecx      / %ecx = address of string 2
3987 3972          cmpl    %eax, %ecx      / if the same string
3988 3973          je      .equal          / goto .equal
3989 3974          movl    ARG_LENGTH(%ebp), %edi  / %edi = length in bytes
3990 3975          cmpl    $4, %edi        / if %edi < 4
3991 3976          jb      .byte_check     / goto .byte_check
3992 3977          .align  4
3993 3978  .word_loop:
3994 3979          movl    (%ecx), %edx    / move 1 word from (%ecx) to %edx
3995 3980          leal    -4(%edi), %edi  / %edi -= 4
3996 3981          cmpl    (%eax), %edx    / compare 1 word from (%eax) with %edx
3997 3982          jne     .word_not_equal / if not equal, goto .word_not_equal
3998 3983          leal    4(%ecx), %ecx   / %ecx += 4 (next word)
3999 3984          leal    4(%eax), %eax   / %eax += 4 (next word)
4000 3985          cmpl    $4, %edi        / if %edi >= 4
4001 3986          jae     .word_loop      / goto .word_loop
4002 3987  .byte_check:
4003 3988          cmpl    $0, %edi        / if %edi == 0
4004 3989          je      .equal          / goto .equal
4005 3990          jmp     .byte_loop      / goto .byte_loop (checks in bytes)
4006 3991  .word_not_equal:
4007 3992          leal    4(%edi), %edi   / %edi += 4 (post-decremented)
4008 3993          .align  4
4009 3994  .byte_loop:
4010 3995          movb    (%ecx), %dl     / move 1 byte from (%ecx) to %dl
4011 3996          cmpb    %dl, (%eax)     / compare %dl with 1 byte from (%eax)
4012 3997          jne     .not_equal      / if not equal, goto .not_equal
4013 3998          incl    %ecx            / %ecx++ (next byte)
4014 3999          incl    %eax            / %eax++ (next byte)
4015 4000          decl    %edi            / %edi--
4016 4001          jnz     .byte_loop      / if not zero, goto .byte_loop
4017 4002  .equal:
4018 4003          xorl    %eax, %eax      / %eax = 0
4019 4004          popl    %edi            / restore register variable
4020 4005          leave                   / restore old stack frame
4021 4006          ret                     / return (NULL)
4022 4007          .align  4
4023 4008  .not_equal:
4024 4009          movl    $1, %eax        / return 1
4025 4010          popl    %edi            / restore register variable
4026 4011          leave                   / restore old stack frame
4027 4012          ret                     / return (NULL)
4028 4013          SET_SIZE(bcmp)
4029 4014  
4030 4015  #endif  /* __i386 */
4031 4016  
4032 4017  #ifdef DEBUG
4033 4018          .text
4034 4019  .bcmp_panic_msg:
4035 4020          .string "bcmp: arguments below kernelbase"
4036 4021  #endif  /* DEBUG */
4037 4022  
4038 4023  #endif  /* __lint */
4039 4024  
4040 4025  #if defined(__lint)
4041 4026  
4042 4027  uint_t
4043 4028  bsrw_insn(uint16_t mask)
4044 4029  {
4045 4030          uint_t index = sizeof (mask) * NBBY - 1;
4046 4031  
4047 4032          while ((mask & (1 << index)) == 0)
4048 4033                  index--;
4049 4034          return (index);
4050 4035  }
4051 4036  
4052 4037  #else   /* __lint */
4053 4038  
4054 4039  #if defined(__amd64)
4055 4040  
4056 4041          ENTRY_NP(bsrw_insn)
4057 4042          xorl    %eax, %eax
4058 4043          bsrw    %di, %ax
4059 4044          ret
4060 4045          SET_SIZE(bsrw_insn)
4061 4046  
4062 4047  #elif defined(__i386)
4063 4048  
4064 4049          ENTRY_NP(bsrw_insn)
4065 4050          movw    4(%esp), %cx
4066 4051          xorl    %eax, %eax
4067 4052          bsrw    %cx, %ax
4068 4053          ret
4069 4054          SET_SIZE(bsrw_insn)
4070 4055  
4071 4056  #endif  /* __i386 */
4072 4057  #endif  /* __lint */
4073 4058  
4074 4059  #if defined(__lint)
4075 4060  
4076 4061  uint_t
4077 4062  atomic_btr32(uint32_t *pending, uint_t pil)
4078 4063  {
4079 4064          return (*pending &= ~(1 << pil));
4080 4065  }
4081 4066  
4082 4067  #else   /* __lint */
4083 4068  
4084 4069  #if defined(__i386)
4085 4070  
4086 4071          ENTRY_NP(atomic_btr32)
4087 4072          movl    4(%esp), %ecx
4088 4073          movl    8(%esp), %edx
4089 4074          xorl    %eax, %eax
4090 4075          lock
4091 4076          btrl    %edx, (%ecx)
4092 4077          setc    %al
4093 4078          ret
4094 4079          SET_SIZE(atomic_btr32)
4095 4080  
4096 4081  #endif  /* __i386 */
4097 4082  #endif  /* __lint */
4098 4083  
4099 4084  #if defined(__lint)
4100 4085  
4101 4086  /*ARGSUSED*/
4102 4087  void
4103 4088  switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
4104 4089              uint_t arg2)
4105 4090  {}
4106 4091  
4107 4092  #else   /* __lint */
4108 4093  
4109 4094  #if defined(__amd64)
4110 4095  
4111 4096          ENTRY_NP(switch_sp_and_call)
4112 4097          pushq   %rbp
4113 4098          movq    %rsp, %rbp              /* set up stack frame */
4114 4099          movq    %rdi, %rsp              /* switch stack pointer */
4115 4100          movq    %rdx, %rdi              /* pass func arg 1 */
4116 4101          movq    %rsi, %r11              /* save function to call */
4117 4102          movq    %rcx, %rsi              /* pass func arg 2 */
4118 4103          call    *%r11                   /* call function */
4119 4104          leave                           /* restore stack */
4120 4105          ret
4121 4106          SET_SIZE(switch_sp_and_call)
4122 4107  
4123 4108  #elif defined(__i386)
4124 4109  
4125 4110          ENTRY_NP(switch_sp_and_call)
4126 4111          pushl   %ebp
4127 4112          mov     %esp, %ebp              /* set up stack frame */
4128 4113          movl    8(%ebp), %esp           /* switch stack pointer */
4129 4114          pushl   20(%ebp)                /* push func arg 2 */
4130 4115          pushl   16(%ebp)                /* push func arg 1 */
4131 4116          call    *12(%ebp)               /* call function */
4132 4117          addl    $8, %esp                /* pop arguments */
4133 4118          leave                           /* restore stack */
4134 4119          ret
4135 4120          SET_SIZE(switch_sp_and_call)
4136 4121  
4137 4122  #endif  /* __i386 */
4138 4123  #endif  /* __lint */
4139 4124  
4140 4125  #if defined(__lint)
4141 4126  
4142 4127  void
4143 4128  kmdb_enter(void)
4144 4129  {}
4145 4130  
4146 4131  #else   /* __lint */
4147 4132  
4148 4133  #if defined(__amd64)
4149 4134  
4150 4135          ENTRY_NP(kmdb_enter)
4151 4136          pushq   %rbp
4152 4137          movq    %rsp, %rbp
4153 4138  
4154 4139          /*
4155 4140           * Save flags, do a 'cli' then return the saved flags
4156 4141           */
4157 4142          call    intr_clear
4158 4143  
4159 4144          int     $T_DBGENTR
4160 4145  
4161 4146          /*
4162 4147           * Restore the saved flags
4163 4148           */
4164 4149          movq    %rax, %rdi
4165 4150          call    intr_restore
4166 4151  
4167 4152          leave
4168 4153          ret     
4169 4154          SET_SIZE(kmdb_enter)
4170 4155  
4171 4156  #elif defined(__i386)
4172 4157  
4173 4158          ENTRY_NP(kmdb_enter)
4174 4159          pushl   %ebp
4175 4160          movl    %esp, %ebp
4176 4161  
4177 4162          /*
4178 4163           * Save flags, do a 'cli' then return the saved flags
4179 4164           */
4180 4165          call    intr_clear
4181 4166  
4182 4167          int     $T_DBGENTR
4183 4168  
4184 4169          /*
4185 4170           * Restore the saved flags
4186 4171           */
4187 4172          pushl   %eax
4188 4173          call    intr_restore
4189 4174          addl    $4, %esp
4190 4175  
4191 4176          leave
4192 4177          ret     
4193 4178          SET_SIZE(kmdb_enter)
4194 4179  
4195 4180  #endif  /* __i386 */
4196 4181  #endif  /* __lint */
4197 4182  
4198 4183  #if defined(__lint)
4199 4184  
4200 4185  void
4201 4186  return_instr(void)
4202 4187  {}
4203 4188  
4204 4189  #else   /* __lint */
4205 4190  
4206 4191          ENTRY_NP(return_instr)
4207 4192          rep;    ret     /* use 2 byte instruction when branch target */
4208 4193                          /* AMD Software Optimization Guide - Section 6.2 */
4209 4194          SET_SIZE(return_instr)
4210 4195  
4211 4196  #endif  /* __lint */
4212 4197  
4213 4198  #if defined(__lint)
4214 4199  
4215 4200  ulong_t
4216 4201  getflags(void)
4217 4202  {
4218 4203          return (0);
4219 4204  }
4220 4205  
4221 4206  #else   /* __lint */
4222 4207  
4223 4208  #if defined(__amd64)
4224 4209  
4225 4210          ENTRY(getflags)
4226 4211          pushfq
4227 4212          popq    %rax
4228 4213  #if defined(__xpv)
4229 4214          CURTHREAD(%rdi)
4230 4215          KPREEMPT_DISABLE(%rdi)
4231 4216          /*
4232 4217           * Synthesize the PS_IE bit from the event mask bit
4233 4218           */
4234 4219          CURVCPU(%r11)
4235 4220          andq    $_BITNOT(PS_IE), %rax
4236 4221          XEN_TEST_UPCALL_MASK(%r11)
4237 4222          jnz     1f
4238 4223          orq     $PS_IE, %rax
4239 4224  1:
4240 4225          KPREEMPT_ENABLE_NOKP(%rdi)
4241 4226  #endif
4242 4227          ret
4243 4228          SET_SIZE(getflags)
4244 4229  
4245 4230  #elif defined(__i386)
4246 4231  
4247 4232          ENTRY(getflags)
4248 4233          pushfl
4249 4234          popl    %eax
4250 4235  #if defined(__xpv)
4251 4236          CURTHREAD(%ecx)
4252 4237          KPREEMPT_DISABLE(%ecx)
4253 4238          /*
4254 4239           * Synthesize the PS_IE bit from the event mask bit
4255 4240           */
4256 4241          CURVCPU(%edx)
4257 4242          andl    $_BITNOT(PS_IE), %eax
4258 4243          XEN_TEST_UPCALL_MASK(%edx)
4259 4244          jnz     1f
4260 4245          orl     $PS_IE, %eax
4261 4246  1:
4262 4247          KPREEMPT_ENABLE_NOKP(%ecx)
4263 4248  #endif
4264 4249          ret
4265 4250          SET_SIZE(getflags)
4266 4251  
4267 4252  #endif  /* __i386 */
4268 4253  
4269 4254  #endif  /* __lint */
4270 4255  
4271 4256  #if defined(__lint)
4272 4257  
4273 4258  ftrace_icookie_t
4274 4259  ftrace_interrupt_disable(void)
4275 4260  { return (0); }
4276 4261  
4277 4262  #else   /* __lint */
4278 4263  
4279 4264  #if defined(__amd64)
4280 4265  
4281 4266          ENTRY(ftrace_interrupt_disable)
4282 4267          pushfq
4283 4268          popq    %rax
4284 4269          CLI(%rdx)
4285 4270          ret
4286 4271          SET_SIZE(ftrace_interrupt_disable)
4287 4272  
4288 4273  #elif defined(__i386)
4289 4274                  
4290 4275          ENTRY(ftrace_interrupt_disable)
4291 4276          pushfl
4292 4277          popl    %eax
4293 4278          CLI(%edx)
4294 4279          ret
4295 4280          SET_SIZE(ftrace_interrupt_disable)
4296 4281  
4297 4282  #endif  /* __i386 */    
4298 4283  #endif  /* __lint */
4299 4284  
4300 4285  #if defined(__lint)
4301 4286  
4302 4287  /*ARGSUSED*/
4303 4288  void
4304 4289  ftrace_interrupt_enable(ftrace_icookie_t cookie)
4305 4290  {}
4306 4291  
4307 4292  #else   /* __lint */
4308 4293  
4309 4294  #if defined(__amd64)
4310 4295  
4311 4296          ENTRY(ftrace_interrupt_enable)
4312 4297          pushq   %rdi
4313 4298          popfq
4314 4299          ret
4315 4300          SET_SIZE(ftrace_interrupt_enable)
4316 4301  
4317 4302  #elif defined(__i386)
4318 4303                  
4319 4304          ENTRY(ftrace_interrupt_enable)
4320 4305          movl    4(%esp), %eax
4321 4306          pushl   %eax
4322 4307          popfl
4323 4308          ret
4324 4309          SET_SIZE(ftrace_interrupt_enable)
4325 4310  
4326 4311  #endif  /* __i386 */
4327 4312  #endif  /* __lint */
4328 4313  
4329 4314  #if defined (__lint)
4330 4315  
4331 4316  /*ARGSUSED*/
4332 4317  void
4333 4318  clflush_insn(caddr_t addr)
4334 4319  {}
4335 4320  
4336 4321  #else /* __lint */
4337 4322  
4338 4323  #if defined (__amd64)
4339 4324          ENTRY(clflush_insn)
4340 4325          clflush (%rdi)
4341 4326          ret
4342 4327          SET_SIZE(clflush_insn)
4343 4328  #elif defined (__i386)
4344 4329          ENTRY(clflush_insn)
4345 4330          movl    4(%esp), %eax
4346 4331          clflush (%eax)
4347 4332          ret
4348 4333          SET_SIZE(clflush_insn)
4349 4334  
4350 4335  #endif /* __i386 */
4351 4336  #endif /* __lint */
4352 4337  
4353 4338  #if defined (__lint)
4354 4339  /*ARGSUSED*/
4355 4340  void
4356 4341  mfence_insn(void)
4357 4342  {}
4358 4343  
4359 4344  #else /* __lint */
4360 4345  
4361 4346  #if defined (__amd64)
4362 4347          ENTRY(mfence_insn)
4363 4348          mfence
4364 4349          ret
4365 4350          SET_SIZE(mfence_insn)
4366 4351  #elif defined (__i386)
4367 4352          ENTRY(mfence_insn)
4368 4353          mfence
4369 4354          ret
4370 4355          SET_SIZE(mfence_insn)
4371 4356  
4372 4357  #endif /* __i386 */
4373 4358  #endif /* __lint */
4374 4359  
4375 4360  /*
4376 4361   * VMware implements an I/O port that programs can query to detect if software
4377 4362   * is running in a VMware hypervisor. This hypervisor port behaves differently
4378 4363   * depending on magic values in certain registers and modifies some registers
4379 4364   * as a side effect.
4380 4365   *
4381 4366   * References: http://kb.vmware.com/kb/1009458 
4382 4367   */
4383 4368  
4384 4369  #if defined(__lint)
4385 4370  
4386 4371  /* ARGSUSED */
4387 4372  void
4388 4373  vmware_port(int cmd, uint32_t *regs) { return; }
4389 4374  
4390 4375  #else
4391 4376  
4392 4377  #if defined(__amd64)
4393 4378  
4394 4379          ENTRY(vmware_port)
4395 4380          pushq   %rbx
4396 4381          movl    $VMWARE_HVMAGIC, %eax
4397 4382          movl    $0xffffffff, %ebx
4398 4383          movl    %edi, %ecx
4399 4384          movl    $VMWARE_HVPORT, %edx
4400 4385          inl     (%dx)
4401 4386          movl    %eax, (%rsi)
4402 4387          movl    %ebx, 4(%rsi)
4403 4388          movl    %ecx, 8(%rsi)
4404 4389          movl    %edx, 12(%rsi)
4405 4390          popq    %rbx
4406 4391          ret
4407 4392          SET_SIZE(vmware_port)
4408 4393  
4409 4394  #elif defined(__i386)
4410 4395  
4411 4396          ENTRY(vmware_port)
4412 4397          pushl   %ebx
4413 4398          pushl   %esi
4414 4399          movl    $VMWARE_HVMAGIC, %eax
4415 4400          movl    $0xffffffff, %ebx
4416 4401          movl    12(%esp), %ecx
4417 4402          movl    $VMWARE_HVPORT, %edx
4418 4403          inl     (%dx)
4419 4404          movl    16(%esp), %esi
4420 4405          movl    %eax, (%esi)
4421 4406          movl    %ebx, 4(%esi)
4422 4407          movl    %ecx, 8(%esi)
4423 4408          movl    %edx, 12(%esi)
4424 4409          popl    %esi
4425 4410          popl    %ebx
4426 4411          ret
4427 4412          SET_SIZE(vmware_port)
4428 4413  
4429 4414  #endif /* __i386 */
4430 4415  #endif /* __lint */
  
    | 
      ↓ open down ↓ | 
    805 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX