Print this page
    
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/intel/ia32/ml/i86_subr.s
          +++ new/usr/src/uts/intel/ia32/ml/i86_subr.s
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  24   24   * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
  25   25   * Copyright (c) 2014 by Delphix. All rights reserved.
  26   26   * Copyright 2016 Joyent, Inc.
  27   27   */
  28   28  
  29   29  /*
  30   30   *  Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
  31   31   *  Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
  32   32   *    All Rights Reserved
  33   33   */
  34   34  
  35   35  /*
  36   36   * Copyright (c) 2009, Intel Corporation.
  37   37   * All rights reserved.
  38   38   */
  39   39  
  40   40  /*
  41   41   * General assembly language routines.
  42   42   * It is the intent of this file to contain routines that are
  43   43   * independent of the specific kernel architecture, and those that are
  44   44   * common across kernel architectures.
  45   45   * As architectures diverge, and implementations of specific
  46   46   * architecture-dependent routines change, the routines should be moved
  47   47   * from this file into the respective ../`arch -k`/subr.s file.
  48   48   */
  49   49  
  50   50  #include <sys/asm_linkage.h>
  51   51  #include <sys/asm_misc.h>
  52   52  #include <sys/panic.h>
  53   53  #include <sys/ontrap.h>
  54   54  #include <sys/regset.h>
  55   55  #include <sys/privregs.h>
  56   56  #include <sys/reboot.h>
  57   57  #include <sys/psw.h>
  58   58  #include <sys/x86_archext.h>
  59   59  
  60   60  #if defined(__lint)
  61   61  #include <sys/types.h>
  62   62  #include <sys/systm.h>
  63   63  #include <sys/thread.h>
  64   64  #include <sys/archsystm.h>
  65   65  #include <sys/byteorder.h>
  66   66  #include <sys/dtrace.h>
  67   67  #include <sys/ftrace.h>
  68   68  #else   /* __lint */
  69   69  #include "assym.h"
  70   70  #endif  /* __lint */
  71   71  #include <sys/dditypes.h>
  72   72  
  73   73  /*
  74   74   * on_fault()
  75   75   *
  76   76   * Catch lofault faults. Like setjmp except it returns one
  77   77   * if code following causes uncorrectable fault. Turned off
  78   78   * by calling no_fault(). Note that while under on_fault(),
  79   79   * SMAP is disabled. For more information see
  80   80   * uts/intel/ia32/ml/copy.s.
  81   81   */
  82   82  
  83   83  #if defined(__lint)
  84   84  
  85   85  /* ARGSUSED */
  86   86  int
  87   87  on_fault(label_t *ljb)
  88   88  { return (0); }
  89   89  
  90   90  void
  91   91  no_fault(void)
  92   92  {}
  93   93  
  94   94  #else   /* __lint */
  95   95  
  96   96  #if defined(__amd64)
  97   97  
  98   98          ENTRY(on_fault)
  99   99          movq    %gs:CPU_THREAD, %rsi
 100  100          leaq    catch_fault(%rip), %rdx
 101  101          movq    %rdi, T_ONFAULT(%rsi)           /* jumpbuf in t_onfault */
 102  102          movq    %rdx, T_LOFAULT(%rsi)           /* catch_fault in t_lofault */
 103  103          call    smap_disable                    /* allow user accesses */
 104  104          jmp     setjmp                          /* let setjmp do the rest */
 105  105  
 106  106  catch_fault:
 107  107          movq    %gs:CPU_THREAD, %rsi
 108  108          movq    T_ONFAULT(%rsi), %rdi           /* address of save area */
 109  109          xorl    %eax, %eax
 110  110          movq    %rax, T_ONFAULT(%rsi)           /* turn off onfault */
 111  111          movq    %rax, T_LOFAULT(%rsi)           /* turn off lofault */
 112  112          call    smap_enable                     /* disallow user accesses */
 113  113          jmp     longjmp                         /* let longjmp do the rest */
 114  114          SET_SIZE(on_fault)
 115  115  
 116  116          ENTRY(no_fault)
 117  117          movq    %gs:CPU_THREAD, %rsi
 118  118          xorl    %eax, %eax
 119  119          movq    %rax, T_ONFAULT(%rsi)           /* turn off onfault */
 120  120          movq    %rax, T_LOFAULT(%rsi)           /* turn off lofault */
 121  121          call    smap_enable                     /* disallow user accesses */
 122  122          ret
 123  123          SET_SIZE(no_fault)
 124  124  
 125  125  #elif defined(__i386)
 126  126  
 127  127          ENTRY(on_fault)
 128  128          movl    %gs:CPU_THREAD, %edx
 129  129          movl    4(%esp), %eax                   /* jumpbuf address */
 130  130          leal    catch_fault, %ecx
 131  131          movl    %eax, T_ONFAULT(%edx)           /* jumpbuf in t_onfault */
 132  132          movl    %ecx, T_LOFAULT(%edx)           /* catch_fault in t_lofault */
 133  133          jmp     setjmp                          /* let setjmp do the rest */
 134  134  
 135  135  catch_fault:
 136  136          movl    %gs:CPU_THREAD, %edx
 137  137          xorl    %eax, %eax
 138  138          movl    T_ONFAULT(%edx), %ecx           /* address of save area */
 139  139          movl    %eax, T_ONFAULT(%edx)           /* turn off onfault */
 140  140          movl    %eax, T_LOFAULT(%edx)           /* turn off lofault */
 141  141          pushl   %ecx
 142  142          call    longjmp                         /* let longjmp do the rest */
 143  143          SET_SIZE(on_fault)
 144  144  
 145  145          ENTRY(no_fault)
 146  146          movl    %gs:CPU_THREAD, %edx
 147  147          xorl    %eax, %eax
 148  148          movl    %eax, T_ONFAULT(%edx)           /* turn off onfault */
 149  149          movl    %eax, T_LOFAULT(%edx)           /* turn off lofault */
 150  150          ret
 151  151          SET_SIZE(no_fault)
 152  152  
 153  153  #endif  /* __i386 */
 154  154  #endif  /* __lint */
 155  155  
 156  156  /*
 157  157   * Default trampoline code for on_trap() (see <sys/ontrap.h>).  We just
 158  158   * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
 159  159   */
 160  160  
 161  161  #if defined(lint)
 162  162  
 163  163  void
 164  164  on_trap_trampoline(void)
 165  165  {}
 166  166  
 167  167  #else   /* __lint */
 168  168  
 169  169  #if defined(__amd64)
 170  170  
 171  171          ENTRY(on_trap_trampoline)
 172  172          movq    %gs:CPU_THREAD, %rsi
 173  173          movq    T_ONTRAP(%rsi), %rdi
 174  174          addq    $OT_JMPBUF, %rdi
 175  175          jmp     longjmp
 176  176          SET_SIZE(on_trap_trampoline)
 177  177  
 178  178  #elif defined(__i386)
 179  179  
 180  180          ENTRY(on_trap_trampoline)
 181  181          movl    %gs:CPU_THREAD, %eax
 182  182          movl    T_ONTRAP(%eax), %eax
 183  183          addl    $OT_JMPBUF, %eax
 184  184          pushl   %eax
 185  185          call    longjmp
 186  186          SET_SIZE(on_trap_trampoline)
 187  187  
 188  188  #endif  /* __i386 */
 189  189  #endif  /* __lint */
 190  190  
 191  191  /*
 192  192   * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
 193  193   * more information about the on_trap() mechanism.  If the on_trap_data is the
 194  194   * same as the topmost stack element, we just modify that element.
 195  195   */
 196  196  #if defined(lint)
 197  197  
 198  198  /*ARGSUSED*/
 199  199  int
 200  200  on_trap(on_trap_data_t *otp, uint_t prot)
 201  201  { return (0); }
 202  202  
 203  203  #else   /* __lint */
 204  204  
 205  205  #if defined(__amd64)
 206  206  
 207  207          ENTRY(on_trap)
 208  208          movw    %si, OT_PROT(%rdi)              /* ot_prot = prot */
 209  209          movw    $0, OT_TRAP(%rdi)               /* ot_trap = 0 */
 210  210          leaq    on_trap_trampoline(%rip), %rdx  /* rdx = &on_trap_trampoline */
 211  211          movq    %rdx, OT_TRAMPOLINE(%rdi)       /* ot_trampoline = rdx */
 212  212          xorl    %ecx, %ecx
 213  213          movq    %rcx, OT_HANDLE(%rdi)           /* ot_handle = NULL */
 214  214          movq    %rcx, OT_PAD1(%rdi)             /* ot_pad1 = NULL */
 215  215          movq    %gs:CPU_THREAD, %rdx            /* rdx = curthread */
 216  216          movq    T_ONTRAP(%rdx), %rcx            /* rcx = curthread->t_ontrap */
 217  217          cmpq    %rdi, %rcx                      /* if (otp == %rcx)     */
 218  218          je      0f                              /*      don't modify t_ontrap */
 219  219  
 220  220          movq    %rcx, OT_PREV(%rdi)             /* ot_prev = t_ontrap */
 221  221          movq    %rdi, T_ONTRAP(%rdx)            /* curthread->t_ontrap = otp */
 222  222  
 223  223  0:      addq    $OT_JMPBUF, %rdi                /* &ot_jmpbuf */
 224  224          jmp     setjmp
 225  225          SET_SIZE(on_trap)
 226  226  
 227  227  #elif defined(__i386)
 228  228  
 229  229          ENTRY(on_trap)
 230  230          movl    4(%esp), %eax                   /* %eax = otp */
 231  231          movl    8(%esp), %edx                   /* %edx = prot */
 232  232  
 233  233          movw    %dx, OT_PROT(%eax)              /* ot_prot = prot */
 234  234          movw    $0, OT_TRAP(%eax)               /* ot_trap = 0 */
 235  235          leal    on_trap_trampoline, %edx        /* %edx = &on_trap_trampoline */
 236  236          movl    %edx, OT_TRAMPOLINE(%eax)       /* ot_trampoline = %edx */
 237  237          movl    $0, OT_HANDLE(%eax)             /* ot_handle = NULL */
 238  238          movl    $0, OT_PAD1(%eax)               /* ot_pad1 = NULL */
 239  239          movl    %gs:CPU_THREAD, %edx            /* %edx = curthread */
 240  240          movl    T_ONTRAP(%edx), %ecx            /* %ecx = curthread->t_ontrap */
 241  241          cmpl    %eax, %ecx                      /* if (otp == %ecx) */
 242  242          je      0f                              /*    don't modify t_ontrap */
 243  243  
 244  244          movl    %ecx, OT_PREV(%eax)             /* ot_prev = t_ontrap */
 245  245          movl    %eax, T_ONTRAP(%edx)            /* curthread->t_ontrap = otp */
 246  246  
 247  247  0:      addl    $OT_JMPBUF, %eax                /* %eax = &ot_jmpbuf */
 248  248          movl    %eax, 4(%esp)                   /* put %eax back on the stack */
 249  249          jmp     setjmp                          /* let setjmp do the rest */
 250  250          SET_SIZE(on_trap)
 251  251  
 252  252  #endif  /* __i386 */
 253  253  #endif  /* __lint */
 254  254  
 255  255  /*
 256  256   * Setjmp and longjmp implement non-local gotos using state vectors
 257  257   * type label_t.
 258  258   */
 259  259  
 260  260  #if defined(__lint)
 261  261  
 262  262  /* ARGSUSED */
 263  263  int
 264  264  setjmp(label_t *lp)
 265  265  { return (0); }
 266  266  
 267  267  /* ARGSUSED */
 268  268  void
 269  269  longjmp(label_t *lp)
 270  270  {}
 271  271  
 272  272  #else   /* __lint */
 273  273  
 274  274  #if LABEL_PC != 0
 275  275  #error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
 276  276  #endif  /* LABEL_PC != 0 */
 277  277  
 278  278  #if defined(__amd64)
 279  279  
 280  280          ENTRY(setjmp)
 281  281          movq    %rsp, LABEL_SP(%rdi)
 282  282          movq    %rbp, LABEL_RBP(%rdi)
 283  283          movq    %rbx, LABEL_RBX(%rdi)
 284  284          movq    %r12, LABEL_R12(%rdi)
 285  285          movq    %r13, LABEL_R13(%rdi)
 286  286          movq    %r14, LABEL_R14(%rdi)
 287  287          movq    %r15, LABEL_R15(%rdi)
 288  288          movq    (%rsp), %rdx            /* return address */
 289  289          movq    %rdx, (%rdi)            /* LABEL_PC is 0 */
 290  290          xorl    %eax, %eax              /* return 0 */
 291  291          ret
 292  292          SET_SIZE(setjmp)
 293  293  
 294  294          ENTRY(longjmp)
 295  295          movq    LABEL_SP(%rdi), %rsp
 296  296          movq    LABEL_RBP(%rdi), %rbp
 297  297          movq    LABEL_RBX(%rdi), %rbx
 298  298          movq    LABEL_R12(%rdi), %r12
 299  299          movq    LABEL_R13(%rdi), %r13
 300  300          movq    LABEL_R14(%rdi), %r14
 301  301          movq    LABEL_R15(%rdi), %r15
 302  302          movq    (%rdi), %rdx            /* return address; LABEL_PC is 0 */
 303  303          movq    %rdx, (%rsp)
 304  304          xorl    %eax, %eax
 305  305          incl    %eax                    /* return 1 */
 306  306          ret
 307  307          SET_SIZE(longjmp)
 308  308  
 309  309  #elif defined(__i386)
 310  310  
 311  311          ENTRY(setjmp)
 312  312          movl    4(%esp), %edx           /* address of save area */
 313  313          movl    %ebp, LABEL_EBP(%edx)
 314  314          movl    %ebx, LABEL_EBX(%edx)
 315  315          movl    %esi, LABEL_ESI(%edx)
 316  316          movl    %edi, LABEL_EDI(%edx)
 317  317          movl    %esp, 4(%edx)
 318  318          movl    (%esp), %ecx            /* %eip (return address) */
 319  319          movl    %ecx, (%edx)            /* LABEL_PC is 0 */
 320  320          subl    %eax, %eax              /* return 0 */
 321  321          ret
 322  322          SET_SIZE(setjmp)
 323  323  
 324  324          ENTRY(longjmp)
 325  325          movl    4(%esp), %edx           /* address of save area */
 326  326          movl    LABEL_EBP(%edx), %ebp
 327  327          movl    LABEL_EBX(%edx), %ebx
 328  328          movl    LABEL_ESI(%edx), %esi
 329  329          movl    LABEL_EDI(%edx), %edi
 330  330          movl    4(%edx), %esp
 331  331          movl    (%edx), %ecx            /* %eip (return addr); LABEL_PC is 0 */
 332  332          movl    $1, %eax
 333  333          addl    $4, %esp                /* pop ret adr */
 334  334          jmp     *%ecx                   /* indirect */
 335  335          SET_SIZE(longjmp)
 336  336  
 337  337  #endif  /* __i386 */
 338  338  #endif  /* __lint */
 339  339  
 340  340  /*
 341  341   * if a() calls b() calls caller(),
 342  342   * caller() returns return address in a().
 343  343   * (Note: We assume a() and b() are C routines which do the normal entry/exit
 344  344   *  sequence.)
 345  345   */
 346  346  
 347  347  #if defined(__lint)
 348  348  
 349  349  caddr_t
 350  350  caller(void)
 351  351  { return (0); }
 352  352  
 353  353  #else   /* __lint */
 354  354  
 355  355  #if defined(__amd64)
 356  356  
 357  357          ENTRY(caller)
 358  358          movq    8(%rbp), %rax           /* b()'s return pc, in a() */
 359  359          ret
 360  360          SET_SIZE(caller)
 361  361  
 362  362  #elif defined(__i386)
 363  363  
 364  364          ENTRY(caller)
 365  365          movl    4(%ebp), %eax           /* b()'s return pc, in a() */
 366  366          ret
 367  367          SET_SIZE(caller)
 368  368  
 369  369  #endif  /* __i386 */
 370  370  #endif  /* __lint */
 371  371  
 372  372  /*
 373  373   * if a() calls callee(), callee() returns the
 374  374   * return address in a();
 375  375   */
 376  376  
 377  377  #if defined(__lint)
 378  378  
 379  379  caddr_t
 380  380  callee(void)
 381  381  { return (0); }
 382  382  
 383  383  #else   /* __lint */
 384  384  
 385  385  #if defined(__amd64)
 386  386  
 387  387          ENTRY(callee)
 388  388          movq    (%rsp), %rax            /* callee()'s return pc, in a() */
 389  389          ret
 390  390          SET_SIZE(callee)
 391  391  
 392  392  #elif defined(__i386)
 393  393  
 394  394          ENTRY(callee)
 395  395          movl    (%esp), %eax            /* callee()'s return pc, in a() */
 396  396          ret
 397  397          SET_SIZE(callee)
 398  398  
 399  399  #endif  /* __i386 */
 400  400  #endif  /* __lint */
 401  401  
 402  402  /*
 403  403   * return the current frame pointer
 404  404   */
 405  405  
 406  406  #if defined(__lint)
 407  407  
 408  408  greg_t
 409  409  getfp(void)
 410  410  { return (0); }
 411  411  
 412  412  #else   /* __lint */
 413  413  
 414  414  #if defined(__amd64)
 415  415  
 416  416          ENTRY(getfp)
 417  417          movq    %rbp, %rax
 418  418          ret
 419  419          SET_SIZE(getfp)
 420  420  
 421  421  #elif defined(__i386)
 422  422  
 423  423          ENTRY(getfp)
 424  424          movl    %ebp, %eax
 425  425          ret
 426  426          SET_SIZE(getfp)
 427  427  
 428  428  #endif  /* __i386 */
 429  429  #endif  /* __lint */
 430  430  
 431  431  /*
 432  432   * Invalidate a single page table entry in the TLB
 433  433   */
 434  434  
 435  435  #if defined(__lint)
 436  436  
 437  437  /* ARGSUSED */
 438  438  void
 439  439  mmu_tlbflush_entry(caddr_t m)
 440  440  {}
 441  441  
 442  442  #else   /* __lint */
 443  443  
 444  444  #if defined(__amd64)
 445  445  
 446  446          ENTRY(mmu_tlbflush_entry)
 447  447          invlpg  (%rdi)
 448  448          ret
 449  449          SET_SIZE(mmu_tlbflush_entry)
 450  450  
 451  451  #elif defined(__i386)
 452  452  
 453  453          ENTRY(mmu_tlbflush_entry)
 454  454          movl    4(%esp), %eax
 455  455          invlpg  (%eax)
 456  456          ret
 457  457          SET_SIZE(mmu_tlbflush_entry)
 458  458  
 459  459  #endif  /* __i386 */
 460  460  #endif  /* __lint */
 461  461  
 462  462  
 463  463  /*
 464  464   * Get/Set the value of various control registers
 465  465   */
 466  466  
 467  467  #if defined(__lint)
 468  468  
 469  469  ulong_t
 470  470  getcr0(void)
 471  471  { return (0); }
 472  472  
 473  473  /* ARGSUSED */
 474  474  void
 475  475  setcr0(ulong_t value)
 476  476  {}
 477  477  
 478  478  ulong_t
 479  479  getcr2(void)
 480  480  { return (0); }
 481  481  
 482  482  ulong_t
 483  483  getcr3(void)
 484  484  { return (0); }
 485  485  
 486  486  #if !defined(__xpv)
 487  487  /* ARGSUSED */
 488  488  void
 489  489  setcr3(ulong_t val)
 490  490  {}
 491  491  
 492  492  void
 493  493  reload_cr3(void)
 494  494  {}
 495  495  #endif
 496  496  
 497  497  ulong_t
 498  498  getcr4(void)
 499  499  { return (0); }
 500  500  
 501  501  /* ARGSUSED */
 502  502  void
 503  503  setcr4(ulong_t val)
 504  504  {}
 505  505  
 506  506  #if defined(__amd64)
 507  507  
 508  508  ulong_t
 509  509  getcr8(void)
 510  510  { return (0); }
 511  511  
 512  512  /* ARGSUSED */
 513  513  void
 514  514  setcr8(ulong_t val)
 515  515  {}
 516  516  
 517  517  #endif  /* __amd64 */
 518  518  
 519  519  #else   /* __lint */
 520  520  
 521  521  #if defined(__amd64)
 522  522  
 523  523          ENTRY(getcr0)
 524  524          movq    %cr0, %rax
 525  525          ret
 526  526          SET_SIZE(getcr0)
 527  527  
 528  528          ENTRY(setcr0)
 529  529          movq    %rdi, %cr0
 530  530          ret
 531  531          SET_SIZE(setcr0)
 532  532  
 533  533          ENTRY(getcr2)
 534  534  #if defined(__xpv)
 535  535          movq    %gs:CPU_VCPU_INFO, %rax
 536  536          movq    VCPU_INFO_ARCH_CR2(%rax), %rax
 537  537  #else
 538  538          movq    %cr2, %rax
 539  539  #endif
 540  540          ret
 541  541          SET_SIZE(getcr2)
 542  542  
 543  543          ENTRY(getcr3)
 544  544          movq    %cr3, %rax
 545  545          ret
 546  546          SET_SIZE(getcr3)
 547  547  
 548  548  #if !defined(__xpv)
 549  549  
 550  550          ENTRY(setcr3)
 551  551          movq    %rdi, %cr3
 552  552          ret
 553  553          SET_SIZE(setcr3)
 554  554  
 555  555          ENTRY(reload_cr3)
 556  556          movq    %cr3, %rdi
 557  557          movq    %rdi, %cr3
 558  558          ret
 559  559          SET_SIZE(reload_cr3)
 560  560  
 561  561  #endif  /* __xpv */
 562  562  
 563  563          ENTRY(getcr4)
 564  564          movq    %cr4, %rax
 565  565          ret
 566  566          SET_SIZE(getcr4)
 567  567  
 568  568          ENTRY(setcr4)
 569  569          movq    %rdi, %cr4
 570  570          ret
 571  571          SET_SIZE(setcr4)
 572  572  
 573  573          ENTRY(getcr8)
 574  574          movq    %cr8, %rax
 575  575          ret
 576  576          SET_SIZE(getcr8)
 577  577  
 578  578          ENTRY(setcr8)
 579  579          movq    %rdi, %cr8
 580  580          ret
 581  581          SET_SIZE(setcr8)
 582  582  
 583  583  #elif defined(__i386)
 584  584  
 585  585          ENTRY(getcr0)
 586  586          movl    %cr0, %eax
 587  587          ret
 588  588          SET_SIZE(getcr0)
 589  589  
 590  590          ENTRY(setcr0)
 591  591          movl    4(%esp), %eax
 592  592          movl    %eax, %cr0
 593  593          ret
 594  594          SET_SIZE(setcr0)
 595  595  
 596  596          /*
 597  597           * "lock mov %cr0" is used on processors which indicate it is
 598  598           * supported via CPUID. Normally the 32 bit TPR is accessed via
 599  599           * the local APIC.
 600  600           */
 601  601          ENTRY(getcr8)
 602  602          lock
 603  603          movl    %cr0, %eax
 604  604          ret
 605  605          SET_SIZE(getcr8)
 606  606  
 607  607          ENTRY(setcr8)
 608  608          movl    4(%esp), %eax
 609  609          lock
 610  610          movl    %eax, %cr0
 611  611          ret
 612  612          SET_SIZE(setcr8)
 613  613  
 614  614          ENTRY(getcr2)
 615  615  #if defined(__xpv)
 616  616          movl    %gs:CPU_VCPU_INFO, %eax
 617  617          movl    VCPU_INFO_ARCH_CR2(%eax), %eax
 618  618  #else
 619  619          movl    %cr2, %eax
 620  620  #endif
 621  621          ret
 622  622          SET_SIZE(getcr2)
 623  623  
 624  624          ENTRY(getcr3)
 625  625          movl    %cr3, %eax
 626  626          ret
 627  627          SET_SIZE(getcr3)
 628  628  
 629  629  #if !defined(__xpv)
 630  630  
 631  631          ENTRY(setcr3)
 632  632          movl    4(%esp), %eax
 633  633          movl    %eax, %cr3
 634  634          ret
 635  635          SET_SIZE(setcr3)
 636  636  
 637  637          ENTRY(reload_cr3)
 638  638          movl    %cr3, %eax
 639  639          movl    %eax, %cr3
 640  640          ret
 641  641          SET_SIZE(reload_cr3)
 642  642  
 643  643  #endif  /* __xpv */
 644  644  
 645  645          ENTRY(getcr4)
 646  646          movl    %cr4, %eax
 647  647          ret
 648  648          SET_SIZE(getcr4)
 649  649  
 650  650          ENTRY(setcr4)
 651  651          movl    4(%esp), %eax
 652  652          movl    %eax, %cr4
 653  653          ret
 654  654          SET_SIZE(setcr4)
 655  655  
 656  656  #endif  /* __i386 */
 657  657  #endif  /* __lint */
 658  658  
 659  659  #if defined(__lint)
 660  660  
 661  661  /*ARGSUSED*/
 662  662  uint32_t
 663  663  __cpuid_insn(struct cpuid_regs *regs)
 664  664  { return (0); }
 665  665  
 666  666  #else   /* __lint */
 667  667  
 668  668  #if defined(__amd64)
 669  669  
 670  670          ENTRY(__cpuid_insn)
 671  671          movq    %rbx, %r8
 672  672          movq    %rcx, %r9
 673  673          movq    %rdx, %r11
 674  674          movl    (%rdi), %eax            /* %eax = regs->cp_eax */
 675  675          movl    0x4(%rdi), %ebx         /* %ebx = regs->cp_ebx */
 676  676          movl    0x8(%rdi), %ecx         /* %ecx = regs->cp_ecx */
 677  677          movl    0xc(%rdi), %edx         /* %edx = regs->cp_edx */
 678  678          cpuid
 679  679          movl    %eax, (%rdi)            /* regs->cp_eax = %eax */
 680  680          movl    %ebx, 0x4(%rdi)         /* regs->cp_ebx = %ebx */
 681  681          movl    %ecx, 0x8(%rdi)         /* regs->cp_ecx = %ecx */
 682  682          movl    %edx, 0xc(%rdi)         /* regs->cp_edx = %edx */
 683  683          movq    %r8, %rbx
 684  684          movq    %r9, %rcx
 685  685          movq    %r11, %rdx
 686  686          ret
 687  687          SET_SIZE(__cpuid_insn)
 688  688  
 689  689  #elif defined(__i386)
 690  690  
 691  691          ENTRY(__cpuid_insn)
 692  692          pushl   %ebp
 693  693          movl    0x8(%esp), %ebp         /* %ebp = regs */
 694  694          pushl   %ebx
 695  695          pushl   %ecx
 696  696          pushl   %edx
 697  697          movl    (%ebp), %eax            /* %eax = regs->cp_eax */
 698  698          movl    0x4(%ebp), %ebx         /* %ebx = regs->cp_ebx */
 699  699          movl    0x8(%ebp), %ecx         /* %ecx = regs->cp_ecx */
 700  700          movl    0xc(%ebp), %edx         /* %edx = regs->cp_edx */
 701  701          cpuid
 702  702          movl    %eax, (%ebp)            /* regs->cp_eax = %eax */
 703  703          movl    %ebx, 0x4(%ebp)         /* regs->cp_ebx = %ebx */
 704  704          movl    %ecx, 0x8(%ebp)         /* regs->cp_ecx = %ecx */
 705  705          movl    %edx, 0xc(%ebp)         /* regs->cp_edx = %edx */
 706  706          popl    %edx
 707  707          popl    %ecx
 708  708          popl    %ebx
 709  709          popl    %ebp
 710  710          ret
 711  711          SET_SIZE(__cpuid_insn)
 712  712  
 713  713  #endif  /* __i386 */
 714  714  #endif  /* __lint */
 715  715  
 716  716  #if defined(__lint)
 717  717  
 718  718  /*ARGSUSED*/
 719  719  void
 720  720  i86_monitor(volatile uint32_t *addr, uint32_t extensions, uint32_t hints)
 721  721  {}
 722  722  
 723  723  #else   /* __lint */
 724  724  
 725  725  #if defined(__amd64)
 726  726  
 727  727          ENTRY_NP(i86_monitor)
 728  728          pushq   %rbp
 729  729          movq    %rsp, %rbp
 730  730          movq    %rdi, %rax              /* addr */
 731  731          movq    %rsi, %rcx              /* extensions */
 732  732          /* rdx contains input arg3: hints */
 733  733          clflush (%rax)
 734  734          .byte   0x0f, 0x01, 0xc8        /* monitor */
 735  735          leave
 736  736          ret
 737  737          SET_SIZE(i86_monitor)
 738  738  
 739  739  #elif defined(__i386)
 740  740  
 741  741  ENTRY_NP(i86_monitor)
 742  742          pushl   %ebp
 743  743          movl    %esp, %ebp
 744  744          movl    0x8(%ebp),%eax          /* addr */
 745  745          movl    0xc(%ebp),%ecx          /* extensions */
 746  746          movl    0x10(%ebp),%edx         /* hints */
 747  747          clflush (%eax)
 748  748          .byte   0x0f, 0x01, 0xc8        /* monitor */
 749  749          leave
 750  750          ret
 751  751          SET_SIZE(i86_monitor)
 752  752  
 753  753  #endif  /* __i386 */
 754  754  #endif  /* __lint */
 755  755  
 756  756  #if defined(__lint)
 757  757  
 758  758  /*ARGSUSED*/
 759  759  void
 760  760  i86_mwait(uint32_t data, uint32_t extensions)
 761  761  {}
 762  762  
 763  763  #else   /* __lint */
 764  764  
 765  765  #if defined(__amd64)
 766  766  
 767  767          ENTRY_NP(i86_mwait)
 768  768          pushq   %rbp
 769  769          movq    %rsp, %rbp
 770  770          movq    %rdi, %rax              /* data */
 771  771          movq    %rsi, %rcx              /* extensions */
 772  772          .byte   0x0f, 0x01, 0xc9        /* mwait */
 773  773          leave
 774  774          ret
 775  775          SET_SIZE(i86_mwait)
 776  776  
 777  777  #elif defined(__i386)
 778  778  
 779  779          ENTRY_NP(i86_mwait)
 780  780          pushl   %ebp
 781  781          movl    %esp, %ebp
 782  782          movl    0x8(%ebp),%eax          /* data */
 783  783          movl    0xc(%ebp),%ecx          /* extensions */
 784  784          .byte   0x0f, 0x01, 0xc9        /* mwait */
 785  785          leave
 786  786          ret
 787  787          SET_SIZE(i86_mwait)
 788  788  
 789  789  #endif  /* __i386 */
 790  790  #endif  /* __lint */
 791  791  
 792  792  #if defined(__xpv)
 793  793          /*
 794  794           * Defined in C
 795  795           */
 796  796  #else
 797  797  
 798  798  #if defined(__lint)
 799  799  
 800  800  hrtime_t
 801  801  tsc_read(void)
 802  802  {
 803  803          return (0);
 804  804  }
 805  805  
 806  806  #else   /* __lint */
 807  807  
 808  808  #if defined(__amd64)
 809  809  
 810  810          ENTRY_NP(tsc_read)
 811  811          movq    %rbx, %r11
 812  812          movl    $0, %eax
 813  813          cpuid
 814  814          rdtsc
 815  815          movq    %r11, %rbx
 816  816          shlq    $32, %rdx
 817  817          orq     %rdx, %rax
 818  818          ret
 819  819          .globl _tsc_mfence_start
 820  820  _tsc_mfence_start:
 821  821          mfence
 822  822          rdtsc
 823  823          shlq    $32, %rdx
 824  824          orq     %rdx, %rax
 825  825          ret
 826  826          .globl _tsc_mfence_end
 827  827  _tsc_mfence_end:
 828  828          .globl _tscp_start
 829  829  _tscp_start:
 830  830          .byte   0x0f, 0x01, 0xf9        /* rdtscp instruction */
 831  831          shlq    $32, %rdx
 832  832          orq     %rdx, %rax
 833  833          ret
 834  834          .globl _tscp_end
 835  835  _tscp_end:
 836  836          .globl _no_rdtsc_start
 837  837  _no_rdtsc_start:
 838  838          xorl    %edx, %edx
 839  839          xorl    %eax, %eax
 840  840          ret
 841  841          .globl _no_rdtsc_end
 842  842  _no_rdtsc_end:
 843  843          .globl _tsc_lfence_start
 844  844  _tsc_lfence_start:
 845  845          lfence
 846  846          rdtsc
 847  847          shlq    $32, %rdx
 848  848          orq     %rdx, %rax
 849  849          ret
 850  850          .globl _tsc_lfence_end
 851  851  _tsc_lfence_end:
 852  852          SET_SIZE(tsc_read)
 853  853  
 854  854  #else /* __i386 */
 855  855  
 856  856          ENTRY_NP(tsc_read)
 857  857          pushl   %ebx
 858  858          movl    $0, %eax
 859  859          cpuid
 860  860          rdtsc
 861  861          popl    %ebx
 862  862          ret
 863  863          .globl _tsc_mfence_start
 864  864  _tsc_mfence_start:
 865  865          mfence
 866  866          rdtsc
 867  867          ret
 868  868          .globl _tsc_mfence_end
 869  869  _tsc_mfence_end:
 870  870          .globl  _tscp_start
 871  871  _tscp_start:
 872  872          .byte   0x0f, 0x01, 0xf9        /* rdtscp instruction */
 873  873          ret
 874  874          .globl _tscp_end
 875  875  _tscp_end:
 876  876          .globl _no_rdtsc_start
 877  877  _no_rdtsc_start:
 878  878          xorl    %edx, %edx
 879  879          xorl    %eax, %eax
 880  880          ret
 881  881          .globl _no_rdtsc_end
 882  882  _no_rdtsc_end:
 883  883          .globl _tsc_lfence_start
 884  884  _tsc_lfence_start:
 885  885          lfence
 886  886          rdtsc
 887  887          ret
 888  888          .globl _tsc_lfence_end
 889  889  _tsc_lfence_end:
 890  890          SET_SIZE(tsc_read)
 891  891  
 892  892  #endif  /* __i386 */
 893  893  
 894  894  #endif  /* __lint */
 895  895  
 896  896  
 897  897  #endif  /* __xpv */
 898  898  
 899  899  #ifdef __lint
 900  900  /*
 901  901   * Do not use this function for obtaining clock tick.  This
 902  902   * is called by callers who do not need to have a guarenteed
 903  903   * correct tick value.  The proper routine to use is tsc_read().
 904  904   */
 905  905  u_longlong_t
 906  906  randtick(void)
 907  907  {
 908  908          return (0);
 909  909  }
 910  910  #else
 911  911  #if defined(__amd64)
 912  912          ENTRY_NP(randtick)
 913  913          rdtsc
 914  914          shlq    $32, %rdx
 915  915          orq     %rdx, %rax
 916  916          ret
 917  917          SET_SIZE(randtick)
 918  918  #else
 919  919          ENTRY_NP(randtick)
 920  920          rdtsc
 921  921          ret
 922  922          SET_SIZE(randtick)
 923  923  #endif /* __i386 */
 924  924  #endif /* __lint */
 925  925  /*
 926  926   * Insert entryp after predp in a doubly linked list.
 927  927   */
 928  928  
 929  929  #if defined(__lint)
 930  930  
 931  931  /*ARGSUSED*/
 932  932  void
 933  933  _insque(caddr_t entryp, caddr_t predp)
 934  934  {}
 935  935  
 936  936  #else   /* __lint */
 937  937  
 938  938  #if defined(__amd64)
 939  939  
 940  940          ENTRY(_insque)
 941  941          movq    (%rsi), %rax            /* predp->forw                  */
 942  942          movq    %rsi, CPTRSIZE(%rdi)    /* entryp->back = predp         */
 943  943          movq    %rax, (%rdi)            /* entryp->forw = predp->forw   */
 944  944          movq    %rdi, (%rsi)            /* predp->forw = entryp         */
 945  945          movq    %rdi, CPTRSIZE(%rax)    /* predp->forw->back = entryp   */
 946  946          ret
 947  947          SET_SIZE(_insque)
 948  948  
 949  949  #elif defined(__i386)
 950  950  
 951  951          ENTRY(_insque)
 952  952          movl    8(%esp), %edx
 953  953          movl    4(%esp), %ecx
 954  954          movl    (%edx), %eax            /* predp->forw                  */
 955  955          movl    %edx, CPTRSIZE(%ecx)    /* entryp->back = predp         */
 956  956          movl    %eax, (%ecx)            /* entryp->forw = predp->forw   */
 957  957          movl    %ecx, (%edx)            /* predp->forw = entryp         */
 958  958          movl    %ecx, CPTRSIZE(%eax)    /* predp->forw->back = entryp   */
 959  959          ret
 960  960          SET_SIZE(_insque)
 961  961  
 962  962  #endif  /* __i386 */
 963  963  #endif  /* __lint */
 964  964  
 965  965  /*
 966  966   * Remove entryp from a doubly linked list
 967  967   */
 968  968  
 969  969  #if defined(__lint)
 970  970  
 971  971  /*ARGSUSED*/
 972  972  void
 973  973  _remque(caddr_t entryp)
 974  974  {}
 975  975  
 976  976  #else   /* __lint */
 977  977  
 978  978  #if defined(__amd64)
 979  979  
 980  980          ENTRY(_remque)
 981  981          movq    (%rdi), %rax            /* entry->forw */
 982  982          movq    CPTRSIZE(%rdi), %rdx    /* entry->back */
 983  983          movq    %rax, (%rdx)            /* entry->back->forw = entry->forw */
 984  984          movq    %rdx, CPTRSIZE(%rax)    /* entry->forw->back = entry->back */
 985  985          ret
 986  986          SET_SIZE(_remque)
 987  987  
 988  988  #elif defined(__i386)
 989  989  
 990  990          ENTRY(_remque)
 991  991          movl    4(%esp), %ecx
 992  992          movl    (%ecx), %eax            /* entry->forw */
 993  993          movl    CPTRSIZE(%ecx), %edx    /* entry->back */
 994  994          movl    %eax, (%edx)            /* entry->back->forw = entry->forw */
 995  995          movl    %edx, CPTRSIZE(%eax)    /* entry->forw->back = entry->back */
 996  996          ret
 997  997          SET_SIZE(_remque)
 998  998  
 999  999  #endif  /* __i386 */
1000 1000  #endif  /* __lint */
1001 1001  
1002 1002  /*
1003 1003   * Returns the number of
1004 1004   * non-NULL bytes in string argument.
1005 1005   */
1006 1006  
1007 1007  #if defined(__lint)
1008 1008  
1009 1009  /* ARGSUSED */
1010 1010  size_t
1011 1011  strlen(const char *str)
1012 1012  { return (0); }
1013 1013  
1014 1014  #else   /* __lint */
1015 1015  
1016 1016  #if defined(__amd64)
1017 1017  
1018 1018  /*
1019 1019   * This is close to a simple transliteration of a C version of this
1020 1020   * routine.  We should either just -make- this be a C version, or
1021 1021   * justify having it in assembler by making it significantly faster.
1022 1022   *
1023 1023   * size_t
1024 1024   * strlen(const char *s)
1025 1025   * {
1026 1026   *      const char *s0;
1027 1027   * #if defined(DEBUG)
1028 1028   *      if ((uintptr_t)s < KERNELBASE)
1029 1029   *              panic(.str_panic_msg);
1030 1030   * #endif
1031 1031   *      for (s0 = s; *s; s++)
1032 1032   *              ;
1033 1033   *      return (s - s0);
1034 1034   * }
1035 1035   */
1036 1036  
1037 1037          ENTRY(strlen)
1038 1038  #ifdef DEBUG
1039 1039          movq    postbootkernelbase(%rip), %rax
1040 1040          cmpq    %rax, %rdi
1041 1041          jae     str_valid
1042 1042          pushq   %rbp
1043 1043          movq    %rsp, %rbp
1044 1044          leaq    .str_panic_msg(%rip), %rdi
1045 1045          xorl    %eax, %eax
1046 1046          call    panic
1047 1047  #endif  /* DEBUG */
1048 1048  str_valid:
1049 1049          cmpb    $0, (%rdi)
1050 1050          movq    %rdi, %rax
1051 1051          je      .null_found
1052 1052          .align  4
1053 1053  .strlen_loop:
1054 1054          incq    %rdi
1055 1055          cmpb    $0, (%rdi)
1056 1056          jne     .strlen_loop
1057 1057  .null_found:
1058 1058          subq    %rax, %rdi
1059 1059          movq    %rdi, %rax
1060 1060          ret
1061 1061          SET_SIZE(strlen)
1062 1062  
1063 1063  #elif defined(__i386)
1064 1064  
1065 1065          ENTRY(strlen)
1066 1066  #ifdef DEBUG
1067 1067          movl    postbootkernelbase, %eax
1068 1068          cmpl    %eax, 4(%esp)
1069 1069          jae     str_valid
1070 1070          pushl   %ebp
1071 1071          movl    %esp, %ebp
1072 1072          pushl   $.str_panic_msg
1073 1073          call    panic
1074 1074  #endif /* DEBUG */
1075 1075  
1076 1076  str_valid:
1077 1077          movl    4(%esp), %eax           /* %eax = string address */
1078 1078          testl   $3, %eax                /* if %eax not word aligned */
1079 1079          jnz     .not_word_aligned       /* goto .not_word_aligned */
1080 1080          .align  4
1081 1081  .word_aligned:
1082 1082          movl    (%eax), %edx            /* move 1 word from (%eax) to %edx */
1083 1083          movl    $0x7f7f7f7f, %ecx
1084 1084          andl    %edx, %ecx              /* %ecx = %edx & 0x7f7f7f7f */
1085 1085          addl    $4, %eax                /* next word */
1086 1086          addl    $0x7f7f7f7f, %ecx       /* %ecx += 0x7f7f7f7f */
1087 1087          orl     %edx, %ecx              /* %ecx |= %edx */
1088 1088          andl    $0x80808080, %ecx       /* %ecx &= 0x80808080 */
1089 1089          cmpl    $0x80808080, %ecx       /* if no null byte in this word */
1090 1090          je      .word_aligned           /* goto .word_aligned */
1091 1091          subl    $4, %eax                /* post-incremented */
1092 1092  .not_word_aligned:
1093 1093          cmpb    $0, (%eax)              /* if a byte in (%eax) is null */
1094 1094          je      .null_found             /* goto .null_found */
1095 1095          incl    %eax                    /* next byte */
1096 1096          testl   $3, %eax                /* if %eax not word aligned */
1097 1097          jnz     .not_word_aligned       /* goto .not_word_aligned */
1098 1098          jmp     .word_aligned           /* goto .word_aligned */
1099 1099          .align  4
1100 1100  .null_found:
1101 1101          subl    4(%esp), %eax           /* %eax -= string address */
1102 1102          ret
1103 1103          SET_SIZE(strlen)
1104 1104  
1105 1105  #endif  /* __i386 */
1106 1106  
1107 1107  #ifdef DEBUG
1108 1108          .text
1109 1109  .str_panic_msg:
1110 1110          .string "strlen: argument below kernelbase"
1111 1111  #endif /* DEBUG */
1112 1112  
1113 1113  #endif  /* __lint */
1114 1114  
1115 1115          /*
1116 1116           * Berkeley 4.3 introduced symbolically named interrupt levels
1117 1117           * as a way deal with priority in a machine independent fashion.
1118 1118           * Numbered priorities are machine specific, and should be
1119 1119           * discouraged where possible.
1120 1120           *
1121 1121           * Note, for the machine specific priorities there are
1122 1122           * examples listed for devices that use a particular priority.
1123 1123           * It should not be construed that all devices of that
1124 1124           * type should be at that priority.  It is currently were
1125 1125           * the current devices fit into the priority scheme based
1126 1126           * upon time criticalness.
1127 1127           *
1128 1128           * The underlying assumption of these assignments is that
1129 1129           * IPL 10 is the highest level from which a device
1130 1130           * routine can call wakeup.  Devices that interrupt from higher
1131 1131           * levels are restricted in what they can do.  If they need
1132 1132           * kernels services they should schedule a routine at a lower
1133 1133           * level (via software interrupt) to do the required
1134 1134           * processing.
1135 1135           *
1136 1136           * Examples of this higher usage:
1137 1137           *      Level   Usage
1138 1138           *      14      Profiling clock (and PROM uart polling clock)
1139 1139           *      12      Serial ports
1140 1140           *
1141 1141           * The serial ports request lower level processing on level 6.
1142 1142           *
1143 1143           * Also, almost all splN routines (where N is a number or a
1144 1144           * mnemonic) will do a RAISE(), on the assumption that they are
1145 1145           * never used to lower our priority.
1146 1146           * The exceptions are:
1147 1147           *      spl8()          Because you can't be above 15 to begin with!
1148 1148           *      splzs()         Because this is used at boot time to lower our
1149 1149           *                      priority, to allow the PROM to poll the uart.
1150 1150           *      spl0()          Used to lower priority to 0.
1151 1151           */
1152 1152  
1153 1153  #if defined(__lint)
1154 1154  
1155 1155  int spl0(void)          { return (0); }
1156 1156  int spl6(void)          { return (0); }
1157 1157  int spl7(void)          { return (0); }
1158 1158  int spl8(void)          { return (0); }
1159 1159  int splhigh(void)       { return (0); }
1160 1160  int splhi(void)         { return (0); }
1161 1161  int splzs(void)         { return (0); }
1162 1162  
1163 1163  /* ARGSUSED */
1164 1164  void
1165 1165  splx(int level)
1166 1166  {}
1167 1167  
1168 1168  #else   /* __lint */
1169 1169  
1170 1170  #if defined(__amd64)
1171 1171  
1172 1172  #define SETPRI(level) \
1173 1173          movl    $/**/level, %edi;       /* new priority */              \
1174 1174          jmp     do_splx                 /* redirect to do_splx */
1175 1175  
1176 1176  #define RAISE(level) \
1177 1177          movl    $/**/level, %edi;       /* new priority */              \
1178 1178          jmp     splr                    /* redirect to splr */
1179 1179  
1180 1180  #elif defined(__i386)
1181 1181  
1182 1182  #define SETPRI(level) \
1183 1183          pushl   $/**/level;     /* new priority */                      \
1184 1184          call    do_splx;        /* invoke common splx code */           \
1185 1185          addl    $4, %esp;       /* unstack arg */                       \
1186 1186          ret
1187 1187  
1188 1188  #define RAISE(level) \
1189 1189          pushl   $/**/level;     /* new priority */                      \
1190 1190          call    splr;           /* invoke common splr code */           \
1191 1191          addl    $4, %esp;       /* unstack args */                      \
1192 1192          ret
1193 1193  
1194 1194  #endif  /* __i386 */
1195 1195  
1196 1196          /* locks out all interrupts, including memory errors */
1197 1197          ENTRY(spl8)
1198 1198          SETPRI(15)
1199 1199          SET_SIZE(spl8)
1200 1200  
1201 1201          /* just below the level that profiling runs */
1202 1202          ENTRY(spl7)
1203 1203          RAISE(13)
1204 1204          SET_SIZE(spl7)
1205 1205  
1206 1206          /* sun specific - highest priority onboard serial i/o asy ports */
1207 1207          ENTRY(splzs)
1208 1208          SETPRI(12)      /* Can't be a RAISE, as it's used to lower us */
1209 1209          SET_SIZE(splzs)
1210 1210  
1211 1211          ENTRY(splhi)
1212 1212          ALTENTRY(splhigh)
1213 1213          ALTENTRY(spl6)
1214 1214          ALTENTRY(i_ddi_splhigh)
1215 1215  
1216 1216          RAISE(DISP_LEVEL)
1217 1217  
1218 1218          SET_SIZE(i_ddi_splhigh)
1219 1219          SET_SIZE(spl6)
1220 1220          SET_SIZE(splhigh)
1221 1221          SET_SIZE(splhi)
1222 1222  
1223 1223          /* allow all interrupts */
1224 1224          ENTRY(spl0)
1225 1225          SETPRI(0)
1226 1226          SET_SIZE(spl0)
1227 1227  
1228 1228  
1229 1229          /* splx implementation */
1230 1230          ENTRY(splx)
1231 1231          jmp     do_splx         /* redirect to common splx code */
1232 1232          SET_SIZE(splx)
1233 1233  
1234 1234  #endif  /* __lint */
1235 1235  
1236 1236  #if defined(__i386)
1237 1237  
1238 1238  /*
1239 1239   * Read and write the %gs register
1240 1240   */
1241 1241  
1242 1242  #if defined(__lint)
1243 1243  
1244 1244  /*ARGSUSED*/
1245 1245  uint16_t
1246 1246  getgs(void)
1247 1247  { return (0); }
1248 1248  
1249 1249  /*ARGSUSED*/
1250 1250  void
1251 1251  setgs(uint16_t sel)
1252 1252  {}
1253 1253  
1254 1254  #else   /* __lint */
1255 1255  
1256 1256          ENTRY(getgs)
1257 1257          clr     %eax
1258 1258          movw    %gs, %ax
1259 1259          ret
1260 1260          SET_SIZE(getgs)
1261 1261  
1262 1262          ENTRY(setgs)
1263 1263          movw    4(%esp), %gs
1264 1264          ret
1265 1265          SET_SIZE(setgs)
1266 1266  
1267 1267  #endif  /* __lint */
1268 1268  #endif  /* __i386 */
1269 1269  
1270 1270  #if defined(__lint)
1271 1271  
1272 1272  void
1273 1273  pc_reset(void)
1274 1274  {}
1275 1275  
1276 1276  void
1277 1277  efi_reset(void)
1278 1278  {}
1279 1279  
1280 1280  #else   /* __lint */
1281 1281  
1282 1282          ENTRY(wait_500ms)
1283 1283  #if defined(__amd64)
1284 1284          pushq   %rbx
1285 1285  #elif defined(__i386)
1286 1286          push    %ebx
1287 1287  #endif
1288 1288          movl    $50000, %ebx
1289 1289  1:
1290 1290          call    tenmicrosec
1291 1291          decl    %ebx
1292 1292          jnz     1b
1293 1293  #if defined(__amd64)
1294 1294          popq    %rbx
1295 1295  #elif defined(__i386)
1296 1296          pop     %ebx
1297 1297  #endif
1298 1298          ret     
1299 1299          SET_SIZE(wait_500ms)
1300 1300  
1301 1301  #define RESET_METHOD_KBC        1
1302 1302  #define RESET_METHOD_PORT92     2
1303 1303  #define RESET_METHOD_PCI        4
1304 1304  
1305 1305          DGDEF3(pc_reset_methods, 4, 8)
1306 1306          .long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
1307 1307  
1308 1308          ENTRY(pc_reset)
1309 1309  
1310 1310  #if defined(__i386)
1311 1311          testl   $RESET_METHOD_KBC, pc_reset_methods
1312 1312  #elif defined(__amd64)
1313 1313          testl   $RESET_METHOD_KBC, pc_reset_methods(%rip)
1314 1314  #endif
1315 1315          jz      1f
1316 1316  
1317 1317          /
1318 1318          / Try the classic keyboard controller-triggered reset.
1319 1319          /
1320 1320          movw    $0x64, %dx
1321 1321          movb    $0xfe, %al
1322 1322          outb    (%dx)
1323 1323  
1324 1324          / Wait up to 500 milliseconds here for the keyboard controller
1325 1325          / to pull the reset line.  On some systems where the keyboard
1326 1326          / controller is slow to pull the reset line, the next reset method
1327 1327          / may be executed (which may be bad if those systems hang when the
1328 1328          / next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
1329 1329          / and Ferrari 4000 (doesn't like the cf9 reset method))
1330 1330  
1331 1331          call    wait_500ms
1332 1332  
1333 1333  1:
1334 1334  #if defined(__i386)
1335 1335          testl   $RESET_METHOD_PORT92, pc_reset_methods
1336 1336  #elif defined(__amd64)
1337 1337          testl   $RESET_METHOD_PORT92, pc_reset_methods(%rip)
1338 1338  #endif
1339 1339          jz      3f
1340 1340  
1341 1341          /
1342 1342          / Try port 0x92 fast reset
1343 1343          /
1344 1344          movw    $0x92, %dx
1345 1345          inb     (%dx)
1346 1346          cmpb    $0xff, %al      / If port's not there, we should get back 0xFF
1347 1347          je      1f
1348 1348          testb   $1, %al         / If bit 0
1349 1349          jz      2f              / is clear, jump to perform the reset
1350 1350          andb    $0xfe, %al      / otherwise,
1351 1351          outb    (%dx)           / clear bit 0 first, then
1352 1352  2:
1353 1353          orb     $1, %al         / Set bit 0
1354 1354          outb    (%dx)           / and reset the system
1355 1355  1:
1356 1356  
1357 1357          call    wait_500ms
1358 1358  
1359 1359  3:
1360 1360  #if defined(__i386)
1361 1361          testl   $RESET_METHOD_PCI, pc_reset_methods
1362 1362  #elif defined(__amd64)
1363 1363          testl   $RESET_METHOD_PCI, pc_reset_methods(%rip)
1364 1364  #endif
1365 1365          jz      4f
1366 1366  
1367 1367          / Try the PCI (soft) reset vector (should work on all modern systems,
1368 1368          / but has been shown to cause problems on 450NX systems, and some newer
1369 1369          / systems (e.g. ATI IXP400-equipped systems))
1370 1370          / When resetting via this method, 2 writes are required.  The first
1371 1371          / targets bit 1 (0=hard reset without power cycle, 1=hard reset with
1372 1372          / power cycle).
1373 1373          / The reset occurs on the second write, during bit 2's transition from
1374 1374          / 0->1.
1375 1375          movw    $0xcf9, %dx
1376 1376          movb    $0x2, %al       / Reset mode = hard, no power cycle
1377 1377          outb    (%dx)
1378 1378          movb    $0x6, %al
1379 1379          outb    (%dx)
1380 1380  
1381 1381          call    wait_500ms
1382 1382  
1383 1383  4:
1384 1384          /
1385 1385          / port 0xcf9 failed also.  Last-ditch effort is to
1386 1386          / triple-fault the CPU.
1387 1387          / Also, use triple fault for EFI firmware
1388 1388          /
1389 1389          ENTRY(efi_reset)
1390 1390  #if defined(__amd64)
1391 1391          pushq   $0x0
1392 1392          pushq   $0x0            / IDT base of 0, limit of 0 + 2 unused bytes
1393 1393          lidt    (%rsp)
1394 1394  #elif defined(__i386)
1395 1395          pushl   $0x0
1396 1396          pushl   $0x0            / IDT base of 0, limit of 0 + 2 unused bytes
1397 1397          lidt    (%esp)
1398 1398  #endif
1399 1399          int     $0x0            / Trigger interrupt, generate triple-fault
1400 1400  
1401 1401          cli
1402 1402          hlt                     / Wait forever
1403 1403          /*NOTREACHED*/
1404 1404          SET_SIZE(efi_reset)
1405 1405          SET_SIZE(pc_reset)
1406 1406  
1407 1407  #endif  /* __lint */
1408 1408  
1409 1409  /*
1410 1410   * C callable in and out routines
1411 1411   */
1412 1412  
1413 1413  #if defined(__lint)
1414 1414  
1415 1415  /* ARGSUSED */
1416 1416  void
1417 1417  outl(int port_address, uint32_t val)
1418 1418  {}
1419 1419  
1420 1420  #else   /* __lint */
1421 1421  
1422 1422  #if defined(__amd64)
1423 1423  
1424 1424          ENTRY(outl)
1425 1425          movw    %di, %dx
1426 1426          movl    %esi, %eax
1427 1427          outl    (%dx)
1428 1428          ret
1429 1429          SET_SIZE(outl)
1430 1430  
1431 1431  #elif defined(__i386)
1432 1432  
1433 1433          .set    PORT, 4
1434 1434          .set    VAL, 8
1435 1435  
1436 1436          ENTRY(outl)
1437 1437          movw    PORT(%esp), %dx
1438 1438          movl    VAL(%esp), %eax
1439 1439          outl    (%dx)
1440 1440          ret
1441 1441          SET_SIZE(outl)
1442 1442  
1443 1443  #endif  /* __i386 */
1444 1444  #endif  /* __lint */
1445 1445  
1446 1446  #if defined(__lint)
1447 1447  
1448 1448  /* ARGSUSED */
1449 1449  void
1450 1450  outw(int port_address, uint16_t val)
1451 1451  {}
1452 1452  
1453 1453  #else   /* __lint */
1454 1454  
1455 1455  #if defined(__amd64)
1456 1456  
1457 1457          ENTRY(outw)
1458 1458          movw    %di, %dx
1459 1459          movw    %si, %ax
1460 1460          D16 outl (%dx)          /* XX64 why not outw? */
1461 1461          ret
1462 1462          SET_SIZE(outw)
1463 1463  
1464 1464  #elif defined(__i386)
1465 1465  
1466 1466          ENTRY(outw)
1467 1467          movw    PORT(%esp), %dx
1468 1468          movw    VAL(%esp), %ax
1469 1469          D16 outl (%dx)
1470 1470          ret
1471 1471          SET_SIZE(outw)
1472 1472  
1473 1473  #endif  /* __i386 */
1474 1474  #endif  /* __lint */
1475 1475  
1476 1476  #if defined(__lint)
1477 1477  
1478 1478  /* ARGSUSED */
1479 1479  void
1480 1480  outb(int port_address, uint8_t val)
1481 1481  {}
1482 1482  
1483 1483  #else   /* __lint */
1484 1484  
1485 1485  #if defined(__amd64)
1486 1486  
1487 1487          ENTRY(outb)
1488 1488          movw    %di, %dx
1489 1489          movb    %sil, %al
1490 1490          outb    (%dx)
1491 1491          ret
1492 1492          SET_SIZE(outb)
1493 1493  
1494 1494  #elif defined(__i386)
1495 1495  
1496 1496          ENTRY(outb)
1497 1497          movw    PORT(%esp), %dx
1498 1498          movb    VAL(%esp), %al
1499 1499          outb    (%dx)
1500 1500          ret
1501 1501          SET_SIZE(outb)
1502 1502  
1503 1503  #endif  /* __i386 */
1504 1504  #endif  /* __lint */
1505 1505  
1506 1506  #if defined(__lint)
1507 1507  
1508 1508  /* ARGSUSED */
1509 1509  uint32_t
1510 1510  inl(int port_address)
1511 1511  { return (0); }
1512 1512  
1513 1513  #else   /* __lint */
1514 1514  
1515 1515  #if defined(__amd64)
1516 1516  
1517 1517          ENTRY(inl)
1518 1518          xorl    %eax, %eax
1519 1519          movw    %di, %dx
1520 1520          inl     (%dx)
1521 1521          ret
1522 1522          SET_SIZE(inl)
1523 1523  
1524 1524  #elif defined(__i386)
1525 1525  
1526 1526          ENTRY(inl)
1527 1527          movw    PORT(%esp), %dx
1528 1528          inl     (%dx)
1529 1529          ret
1530 1530          SET_SIZE(inl)
1531 1531  
1532 1532  #endif  /* __i386 */
1533 1533  #endif  /* __lint */
1534 1534  
1535 1535  #if defined(__lint)
1536 1536  
1537 1537  /* ARGSUSED */
1538 1538  uint16_t
1539 1539  inw(int port_address)
1540 1540  { return (0); }
1541 1541  
1542 1542  #else   /* __lint */
1543 1543  
1544 1544  #if defined(__amd64)
1545 1545  
1546 1546          ENTRY(inw)
1547 1547          xorl    %eax, %eax
1548 1548          movw    %di, %dx
1549 1549          D16 inl (%dx)
1550 1550          ret
1551 1551          SET_SIZE(inw)
1552 1552  
1553 1553  #elif defined(__i386)
1554 1554  
1555 1555          ENTRY(inw)
1556 1556          subl    %eax, %eax
1557 1557          movw    PORT(%esp), %dx
1558 1558          D16 inl (%dx)
1559 1559          ret
1560 1560          SET_SIZE(inw)
1561 1561  
1562 1562  #endif  /* __i386 */
1563 1563  #endif  /* __lint */
1564 1564  
1565 1565  
1566 1566  #if defined(__lint)
1567 1567  
1568 1568  /* ARGSUSED */
1569 1569  uint8_t
1570 1570  inb(int port_address)
1571 1571  { return (0); }
1572 1572  
1573 1573  #else   /* __lint */
1574 1574  
1575 1575  #if defined(__amd64)
1576 1576  
1577 1577          ENTRY(inb)
1578 1578          xorl    %eax, %eax
1579 1579          movw    %di, %dx
1580 1580          inb     (%dx)
1581 1581          ret
1582 1582          SET_SIZE(inb)
1583 1583  
1584 1584  #elif defined(__i386)
1585 1585  
1586 1586          ENTRY(inb)
1587 1587          subl    %eax, %eax
1588 1588          movw    PORT(%esp), %dx
1589 1589          inb     (%dx)
1590 1590          ret
1591 1591          SET_SIZE(inb)
1592 1592  
1593 1593  #endif  /* __i386 */
1594 1594  #endif  /* __lint */
1595 1595  
1596 1596  
1597 1597  #if defined(__lint)
1598 1598  
1599 1599  /* ARGSUSED */
1600 1600  void
1601 1601  repoutsw(int port, uint16_t *addr, int cnt)
1602 1602  {}
1603 1603  
1604 1604  #else   /* __lint */
1605 1605  
1606 1606  #if defined(__amd64)
1607 1607  
1608 1608          ENTRY(repoutsw)
1609 1609          movl    %edx, %ecx
1610 1610          movw    %di, %dx
1611 1611          rep
1612 1612            D16 outsl
1613 1613          ret
1614 1614          SET_SIZE(repoutsw)
1615 1615  
1616 1616  #elif defined(__i386)
1617 1617  
1618 1618          /*
1619 1619           * The arguments and saved registers are on the stack in the
1620 1620           *  following order:
1621 1621           *      |  cnt  |  +16
1622 1622           *      | *addr |  +12
1623 1623           *      | port  |  +8
1624 1624           *      |  eip  |  +4
1625 1625           *      |  esi  |  <-- %esp
1626 1626           * If additional values are pushed onto the stack, make sure
1627 1627           * to adjust the following constants accordingly.
1628 1628           */
1629 1629          .set    PORT, 8
1630 1630          .set    ADDR, 12
1631 1631          .set    COUNT, 16
1632 1632  
1633 1633          ENTRY(repoutsw)
1634 1634          pushl   %esi
1635 1635          movl    PORT(%esp), %edx
1636 1636          movl    ADDR(%esp), %esi
1637 1637          movl    COUNT(%esp), %ecx
1638 1638          rep
1639 1639            D16 outsl
1640 1640          popl    %esi
1641 1641          ret
1642 1642          SET_SIZE(repoutsw)
1643 1643  
1644 1644  #endif  /* __i386 */
1645 1645  #endif  /* __lint */
1646 1646  
1647 1647  
1648 1648  #if defined(__lint)
1649 1649  
1650 1650  /* ARGSUSED */
1651 1651  void
1652 1652  repinsw(int port_addr, uint16_t *addr, int cnt)
1653 1653  {}
1654 1654  
1655 1655  #else   /* __lint */
1656 1656  
1657 1657  #if defined(__amd64)
1658 1658  
1659 1659          ENTRY(repinsw)
1660 1660          movl    %edx, %ecx
1661 1661          movw    %di, %dx
1662 1662          rep
1663 1663            D16 insl
1664 1664          ret
1665 1665          SET_SIZE(repinsw)
1666 1666  
1667 1667  #elif defined(__i386)
1668 1668  
1669 1669          ENTRY(repinsw)
1670 1670          pushl   %edi
1671 1671          movl    PORT(%esp), %edx
1672 1672          movl    ADDR(%esp), %edi
1673 1673          movl    COUNT(%esp), %ecx
1674 1674          rep
1675 1675            D16 insl
1676 1676          popl    %edi
1677 1677          ret
1678 1678          SET_SIZE(repinsw)
1679 1679  
1680 1680  #endif  /* __i386 */
1681 1681  #endif  /* __lint */
1682 1682  
1683 1683  
1684 1684  #if defined(__lint)
1685 1685  
1686 1686  /* ARGSUSED */
1687 1687  void
1688 1688  repinsb(int port, uint8_t *addr, int count)
1689 1689  {}
1690 1690  
1691 1691  #else   /* __lint */
1692 1692  
1693 1693  #if defined(__amd64)
1694 1694  
1695 1695          ENTRY(repinsb)
1696 1696          movl    %edx, %ecx      
1697 1697          movw    %di, %dx
1698 1698          movq    %rsi, %rdi
1699 1699          rep
1700 1700            insb
1701 1701          ret             
1702 1702          SET_SIZE(repinsb)
1703 1703  
1704 1704  #elif defined(__i386)
1705 1705          
1706 1706          /*
1707 1707           * The arguments and saved registers are on the stack in the
1708 1708           *  following order:
1709 1709           *      |  cnt  |  +16
1710 1710           *      | *addr |  +12
1711 1711           *      | port  |  +8
1712 1712           *      |  eip  |  +4
1713 1713           *      |  esi  |  <-- %esp
1714 1714           * If additional values are pushed onto the stack, make sure
1715 1715           * to adjust the following constants accordingly.
1716 1716           */
1717 1717          .set    IO_PORT, 8
1718 1718          .set    IO_ADDR, 12
1719 1719          .set    IO_COUNT, 16
1720 1720  
1721 1721          ENTRY(repinsb)
1722 1722          pushl   %edi
1723 1723          movl    IO_ADDR(%esp), %edi
1724 1724          movl    IO_COUNT(%esp), %ecx
1725 1725          movl    IO_PORT(%esp), %edx
1726 1726          rep
1727 1727            insb
1728 1728          popl    %edi
1729 1729          ret
1730 1730          SET_SIZE(repinsb)
1731 1731  
1732 1732  #endif  /* __i386 */
1733 1733  #endif  /* __lint */
1734 1734  
1735 1735  
1736 1736  /*
1737 1737   * Input a stream of 32-bit words.
1738 1738   * NOTE: count is a DWORD count.
1739 1739   */
1740 1740  #if defined(__lint)
1741 1741  
1742 1742  /* ARGSUSED */
1743 1743  void
1744 1744  repinsd(int port, uint32_t *addr, int count)
1745 1745  {}
1746 1746  
1747 1747  #else   /* __lint */
1748 1748  
1749 1749  #if defined(__amd64)
1750 1750          
1751 1751          ENTRY(repinsd)
1752 1752          movl    %edx, %ecx
1753 1753          movw    %di, %dx
1754 1754          movq    %rsi, %rdi
1755 1755          rep
1756 1756            insl
1757 1757          ret
1758 1758          SET_SIZE(repinsd)
1759 1759  
1760 1760  #elif defined(__i386)
1761 1761  
1762 1762          ENTRY(repinsd)
1763 1763          pushl   %edi
1764 1764          movl    IO_ADDR(%esp), %edi
1765 1765          movl    IO_COUNT(%esp), %ecx
1766 1766          movl    IO_PORT(%esp), %edx
1767 1767          rep
1768 1768            insl
1769 1769          popl    %edi
1770 1770          ret
1771 1771          SET_SIZE(repinsd)
1772 1772  
1773 1773  #endif  /* __i386 */
1774 1774  #endif  /* __lint */
1775 1775  
1776 1776  /*
1777 1777   * Output a stream of bytes
1778 1778   * NOTE: count is a byte count
1779 1779   */
1780 1780  #if defined(__lint)
1781 1781  
1782 1782  /* ARGSUSED */
1783 1783  void
1784 1784  repoutsb(int port, uint8_t *addr, int count)
1785 1785  {}
1786 1786  
1787 1787  #else   /* __lint */
1788 1788  
1789 1789  #if defined(__amd64)
1790 1790  
1791 1791          ENTRY(repoutsb)
1792 1792          movl    %edx, %ecx
1793 1793          movw    %di, %dx
1794 1794          rep
1795 1795            outsb
1796 1796          ret     
1797 1797          SET_SIZE(repoutsb)
1798 1798  
1799 1799  #elif defined(__i386)
1800 1800  
1801 1801          ENTRY(repoutsb)
1802 1802          pushl   %esi
1803 1803          movl    IO_ADDR(%esp), %esi
1804 1804          movl    IO_COUNT(%esp), %ecx
1805 1805          movl    IO_PORT(%esp), %edx
1806 1806          rep
1807 1807            outsb
1808 1808          popl    %esi
1809 1809          ret
1810 1810          SET_SIZE(repoutsb)
1811 1811  
1812 1812  #endif  /* __i386 */    
1813 1813  #endif  /* __lint */
1814 1814  
1815 1815  /*
1816 1816   * Output a stream of 32-bit words
1817 1817   * NOTE: count is a DWORD count
1818 1818   */
1819 1819  #if defined(__lint)
1820 1820  
1821 1821  /* ARGSUSED */
1822 1822  void
1823 1823  repoutsd(int port, uint32_t *addr, int count)
1824 1824  {}
1825 1825  
1826 1826  #else   /* __lint */
1827 1827  
1828 1828  #if defined(__amd64)
1829 1829  
1830 1830          ENTRY(repoutsd)
1831 1831          movl    %edx, %ecx
1832 1832          movw    %di, %dx
1833 1833          rep
1834 1834            outsl
1835 1835          ret     
1836 1836          SET_SIZE(repoutsd)
1837 1837  
1838 1838  #elif defined(__i386)
1839 1839  
1840 1840          ENTRY(repoutsd)
1841 1841          pushl   %esi
1842 1842          movl    IO_ADDR(%esp), %esi
1843 1843          movl    IO_COUNT(%esp), %ecx
1844 1844          movl    IO_PORT(%esp), %edx
1845 1845          rep
1846 1846            outsl
1847 1847          popl    %esi
1848 1848          ret
1849 1849          SET_SIZE(repoutsd)
1850 1850  
1851 1851  #endif  /* __i386 */
1852 1852  #endif  /* __lint */
1853 1853  
1854 1854  /*
1855 1855   * void int3(void)
1856 1856   * void int18(void)
1857 1857   * void int20(void)
1858 1858   * void int_cmci(void)
1859 1859   */
1860 1860  
1861 1861  #if defined(__lint)
1862 1862  
1863 1863  void
1864 1864  int3(void)
1865 1865  {}
1866 1866  
1867 1867  void
1868 1868  int18(void)
1869 1869  {}
1870 1870  
1871 1871  void
1872 1872  int20(void)
1873 1873  {}
1874 1874  
1875 1875  void
1876 1876  int_cmci(void)
1877 1877  {}
1878 1878  
1879 1879  #else   /* __lint */
1880 1880  
1881 1881          ENTRY(int3)
1882 1882          int     $T_BPTFLT
1883 1883          ret
1884 1884          SET_SIZE(int3)
1885 1885  
1886 1886          ENTRY(int18)
1887 1887          int     $T_MCE
1888 1888          ret
1889 1889          SET_SIZE(int18)
1890 1890  
1891 1891          ENTRY(int20)
1892 1892          movl    boothowto, %eax
1893 1893          andl    $RB_DEBUG, %eax
1894 1894          jz      1f
1895 1895  
1896 1896          int     $T_DBGENTR
1897 1897  1:
1898 1898          rep;    ret     /* use 2 byte return instruction when branch target */
1899 1899                          /* AMD Software Optimization Guide - Section 6.2 */
1900 1900          SET_SIZE(int20)
1901 1901  
1902 1902          ENTRY(int_cmci)
1903 1903          int     $T_ENOEXTFLT
1904 1904          ret
1905 1905          SET_SIZE(int_cmci)
1906 1906  
1907 1907  #endif  /* __lint */
1908 1908  
1909 1909  #if defined(__lint)
1910 1910  
1911 1911  /* ARGSUSED */
1912 1912  int
1913 1913  scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
1914 1914  { return (0); }
1915 1915  
1916 1916  #else   /* __lint */
1917 1917  
1918 1918  #if defined(__amd64)
1919 1919  
1920 1920          ENTRY(scanc)
1921 1921                                          /* rdi == size */
1922 1922                                          /* rsi == cp */
1923 1923                                          /* rdx == table */
1924 1924                                          /* rcx == mask */
1925 1925          addq    %rsi, %rdi              /* end = &cp[size] */
1926 1926  .scanloop:
1927 1927          cmpq    %rdi, %rsi              /* while (cp < end */
1928 1928          jnb     .scandone
1929 1929          movzbq  (%rsi), %r8             /* %r8 = *cp */
1930 1930          incq    %rsi                    /* cp++ */
1931 1931          testb   %cl, (%r8, %rdx)
1932 1932          jz      .scanloop               /*  && (table[*cp] & mask) == 0) */
1933 1933          decq    %rsi                    /* (fix post-increment) */
1934 1934  .scandone:
1935 1935          movl    %edi, %eax
1936 1936          subl    %esi, %eax              /* return (end - cp) */
1937 1937          ret
1938 1938          SET_SIZE(scanc)
1939 1939  
1940 1940  #elif defined(__i386)
1941 1941  
1942 1942          ENTRY(scanc)
1943 1943          pushl   %edi
1944 1944          pushl   %esi
1945 1945          movb    24(%esp), %cl           /* mask = %cl */
1946 1946          movl    16(%esp), %esi          /* cp = %esi */
1947 1947          movl    20(%esp), %edx          /* table = %edx */
1948 1948          movl    %esi, %edi
1949 1949          addl    12(%esp), %edi          /* end = &cp[size]; */
1950 1950  .scanloop:
1951 1951          cmpl    %edi, %esi              /* while (cp < end */
1952 1952          jnb     .scandone
1953 1953          movzbl  (%esi),  %eax           /* %al = *cp */
1954 1954          incl    %esi                    /* cp++ */
1955 1955          movb    (%edx,  %eax), %al      /* %al = table[*cp] */
1956 1956          testb   %al, %cl
1957 1957          jz      .scanloop               /*   && (table[*cp] & mask) == 0) */
1958 1958          dec     %esi                    /* post-incremented */
1959 1959  .scandone:
1960 1960          movl    %edi, %eax
1961 1961          subl    %esi, %eax              /* return (end - cp) */
1962 1962          popl    %esi
1963 1963          popl    %edi
1964 1964          ret
1965 1965          SET_SIZE(scanc)
1966 1966  
1967 1967  #endif  /* __i386 */
1968 1968  #endif  /* __lint */
1969 1969  
1970 1970  /*
1971 1971   * Replacement functions for ones that are normally inlined.
1972 1972   * In addition to the copy in i86.il, they are defined here just in case.
1973 1973   */
1974 1974  
1975 1975  #if defined(__lint)
1976 1976  
1977 1977  ulong_t
1978 1978  intr_clear(void)
1979 1979  { return (0); }
1980 1980  
1981 1981  ulong_t
1982 1982  clear_int_flag(void)
1983 1983  { return (0); }
1984 1984  
1985 1985  #else   /* __lint */
1986 1986  
1987 1987  #if defined(__amd64)
1988 1988  
1989 1989          ENTRY(intr_clear)
1990 1990          ENTRY(clear_int_flag)
1991 1991          pushfq
1992 1992          popq    %rax
1993 1993  #if defined(__xpv)
1994 1994          leaq    xpv_panicking, %rdi
1995 1995          movl    (%rdi), %edi
1996 1996          cmpl    $0, %edi
1997 1997          jne     2f
1998 1998          CLIRET(%rdi, %dl)       /* returns event mask in %dl */
1999 1999          /*
2000 2000           * Synthesize the PS_IE bit from the event mask bit
2001 2001           */
2002 2002          andq    $_BITNOT(PS_IE), %rax
2003 2003          testb   $1, %dl
2004 2004          jnz     1f
2005 2005          orq     $PS_IE, %rax
2006 2006  1:
2007 2007          ret
2008 2008  2:
2009 2009  #endif
2010 2010          CLI(%rdi)
2011 2011          ret
2012 2012          SET_SIZE(clear_int_flag)
2013 2013          SET_SIZE(intr_clear)
2014 2014  
2015 2015  #elif defined(__i386)
2016 2016  
2017 2017          ENTRY(intr_clear)
2018 2018          ENTRY(clear_int_flag)
2019 2019          pushfl
2020 2020          popl    %eax
2021 2021  #if defined(__xpv)
2022 2022          leal    xpv_panicking, %edx
2023 2023          movl    (%edx), %edx
2024 2024          cmpl    $0, %edx
2025 2025          jne     2f
2026 2026          CLIRET(%edx, %cl)       /* returns event mask in %cl */
2027 2027          /*
2028 2028           * Synthesize the PS_IE bit from the event mask bit
2029 2029           */
2030 2030          andl    $_BITNOT(PS_IE), %eax
2031 2031          testb   $1, %cl
2032 2032          jnz     1f
2033 2033          orl     $PS_IE, %eax
2034 2034  1:
2035 2035          ret
2036 2036  2:
2037 2037  #endif
2038 2038          CLI(%edx)
2039 2039          ret
2040 2040          SET_SIZE(clear_int_flag)
2041 2041          SET_SIZE(intr_clear)
2042 2042  
2043 2043  #endif  /* __i386 */
2044 2044  #endif  /* __lint */
2045 2045  
2046 2046  #if defined(__lint)
2047 2047  
2048 2048  struct cpu *
2049 2049  curcpup(void)
2050 2050  { return 0; }
2051 2051  
2052 2052  #else   /* __lint */
2053 2053  
2054 2054  #if defined(__amd64)
2055 2055  
2056 2056          ENTRY(curcpup)
2057 2057          movq    %gs:CPU_SELF, %rax
2058 2058          ret
2059 2059          SET_SIZE(curcpup)
2060 2060  
2061 2061  #elif defined(__i386)
2062 2062  
2063 2063          ENTRY(curcpup)
2064 2064          movl    %gs:CPU_SELF, %eax
2065 2065          ret
2066 2066          SET_SIZE(curcpup)
2067 2067  
2068 2068  #endif  /* __i386 */
2069 2069  #endif  /* __lint */
2070 2070  
2071 2071  /* htonll(), ntohll(), htonl(), ntohl(), htons(), ntohs()
2072 2072   * These functions reverse the byte order of the input parameter and returns
2073 2073   * the result.  This is to convert the byte order from host byte order
2074 2074   * (little endian) to network byte order (big endian), or vice versa.
2075 2075   */
2076 2076  
2077 2077  #if defined(__lint)
2078 2078  
2079 2079  uint64_t
2080 2080  htonll(uint64_t i)
2081 2081  { return (i); }
2082 2082  
2083 2083  uint64_t
2084 2084  ntohll(uint64_t i)
2085 2085  { return (i); }
2086 2086  
2087 2087  uint32_t
2088 2088  htonl(uint32_t i)
2089 2089  { return (i); }
2090 2090  
2091 2091  uint32_t
2092 2092  ntohl(uint32_t i)
2093 2093  { return (i); }
2094 2094  
2095 2095  uint16_t
2096 2096  htons(uint16_t i)
2097 2097  { return (i); }
2098 2098  
2099 2099  uint16_t
2100 2100  ntohs(uint16_t i)
2101 2101  { return (i); }
2102 2102  
2103 2103  #else   /* __lint */
2104 2104  
2105 2105  #if defined(__amd64)
2106 2106  
2107 2107          ENTRY(htonll)
2108 2108          ALTENTRY(ntohll)
2109 2109          movq    %rdi, %rax
2110 2110          bswapq  %rax
2111 2111          ret
2112 2112          SET_SIZE(ntohll)
2113 2113          SET_SIZE(htonll)
2114 2114  
2115 2115          /* XX64 there must be shorter sequences for this */
2116 2116          ENTRY(htonl)
2117 2117          ALTENTRY(ntohl)
2118 2118          movl    %edi, %eax
2119 2119          bswap   %eax
2120 2120          ret
2121 2121          SET_SIZE(ntohl)
2122 2122          SET_SIZE(htonl)
2123 2123  
2124 2124          /* XX64 there must be better sequences for this */
2125 2125          ENTRY(htons)
2126 2126          ALTENTRY(ntohs)
2127 2127          movl    %edi, %eax
2128 2128          bswap   %eax
2129 2129          shrl    $16, %eax
2130 2130          ret
2131 2131          SET_SIZE(ntohs)
2132 2132          SET_SIZE(htons)
2133 2133  
2134 2134  #elif defined(__i386)
2135 2135  
2136 2136          ENTRY(htonll)
2137 2137          ALTENTRY(ntohll)
2138 2138          movl    4(%esp), %edx
2139 2139          movl    8(%esp), %eax
2140 2140          bswap   %edx
2141 2141          bswap   %eax
2142 2142          ret
2143 2143          SET_SIZE(ntohll)
2144 2144          SET_SIZE(htonll)
2145 2145  
2146 2146          ENTRY(htonl)
2147 2147          ALTENTRY(ntohl)
2148 2148          movl    4(%esp), %eax
2149 2149          bswap   %eax
2150 2150          ret
2151 2151          SET_SIZE(ntohl)
2152 2152          SET_SIZE(htonl)
2153 2153  
2154 2154          ENTRY(htons)
2155 2155          ALTENTRY(ntohs)
2156 2156          movl    4(%esp), %eax
2157 2157          bswap   %eax
2158 2158          shrl    $16, %eax
2159 2159          ret
2160 2160          SET_SIZE(ntohs)
2161 2161          SET_SIZE(htons)
2162 2162  
2163 2163  #endif  /* __i386 */
2164 2164  #endif  /* __lint */
2165 2165  
2166 2166  
2167 2167  #if defined(__lint)
2168 2168  
2169 2169  /* ARGSUSED */
2170 2170  void
2171 2171  intr_restore(ulong_t i)
2172 2172  { return; }
2173 2173  
2174 2174  /* ARGSUSED */
2175 2175  void
2176 2176  restore_int_flag(ulong_t i)
2177 2177  { return; }
2178 2178  
2179 2179  #else   /* __lint */
2180 2180  
2181 2181  #if defined(__amd64)
2182 2182  
2183 2183          ENTRY(intr_restore)
2184 2184          ENTRY(restore_int_flag)
2185 2185          testq   $PS_IE, %rdi
2186 2186          jz      1f
2187 2187  #if defined(__xpv)
2188 2188          leaq    xpv_panicking, %rsi
2189 2189          movl    (%rsi), %esi
2190 2190          cmpl    $0, %esi
2191 2191          jne     1f
2192 2192          /*
2193 2193           * Since we're -really- running unprivileged, our attempt
2194 2194           * to change the state of the IF bit will be ignored.
2195 2195           * The virtual IF bit is tweaked by CLI and STI.
2196 2196           */
2197 2197          IE_TO_EVENT_MASK(%rsi, %rdi)
2198 2198  #else
2199 2199          sti
2200 2200  #endif
2201 2201  1:
2202 2202          ret
2203 2203          SET_SIZE(restore_int_flag)
2204 2204          SET_SIZE(intr_restore)
2205 2205  
2206 2206  #elif defined(__i386)
2207 2207  
2208 2208          ENTRY(intr_restore)
2209 2209          ENTRY(restore_int_flag)
2210 2210          testl   $PS_IE, 4(%esp)
2211 2211          jz      1f
2212 2212  #if defined(__xpv)
2213 2213          leal    xpv_panicking, %edx
2214 2214          movl    (%edx), %edx
2215 2215          cmpl    $0, %edx
2216 2216          jne     1f
2217 2217          /*
2218 2218           * Since we're -really- running unprivileged, our attempt
2219 2219           * to change the state of the IF bit will be ignored.
2220 2220           * The virtual IF bit is tweaked by CLI and STI.
2221 2221           */
2222 2222          IE_TO_EVENT_MASK(%edx, 4(%esp))
2223 2223  #else
2224 2224          sti
2225 2225  #endif
2226 2226  1:
2227 2227          ret
2228 2228          SET_SIZE(restore_int_flag)
2229 2229          SET_SIZE(intr_restore)
2230 2230  
2231 2231  #endif  /* __i386 */
2232 2232  #endif  /* __lint */
2233 2233  
2234 2234  #if defined(__lint)
2235 2235  
2236 2236  void
2237 2237  sti(void)
2238 2238  {}
2239 2239  
2240 2240  void
2241 2241  cli(void)
2242 2242  {}
2243 2243  
2244 2244  #else   /* __lint */
2245 2245  
2246 2246          ENTRY(sti)
2247 2247          STI
2248 2248          ret
2249 2249          SET_SIZE(sti)
2250 2250  
2251 2251          ENTRY(cli)
2252 2252  #if defined(__amd64)
2253 2253          CLI(%rax)
2254 2254  #elif defined(__i386)
2255 2255          CLI(%eax)
2256 2256  #endif  /* __i386 */
2257 2257          ret
2258 2258          SET_SIZE(cli)
2259 2259  
2260 2260  #endif  /* __lint */
2261 2261  
2262 2262  #if defined(__lint)
2263 2263  
2264 2264  dtrace_icookie_t
2265 2265  dtrace_interrupt_disable(void)
2266 2266  { return (0); }
2267 2267  
2268 2268  #else   /* __lint */
2269 2269  
2270 2270  #if defined(__amd64)
2271 2271  
2272 2272          ENTRY(dtrace_interrupt_disable)
2273 2273          pushfq
2274 2274          popq    %rax
2275 2275  #if defined(__xpv)
2276 2276          leaq    xpv_panicking, %rdi
2277 2277          movl    (%rdi), %edi
2278 2278          cmpl    $0, %edi
2279 2279          jne     .dtrace_interrupt_disable_done
2280 2280          CLIRET(%rdi, %dl)       /* returns event mask in %dl */
2281 2281          /*
2282 2282           * Synthesize the PS_IE bit from the event mask bit
2283 2283           */
2284 2284          andq    $_BITNOT(PS_IE), %rax
2285 2285          testb   $1, %dl
2286 2286          jnz     .dtrace_interrupt_disable_done
2287 2287          orq     $PS_IE, %rax
2288 2288  #else
2289 2289          CLI(%rdx)
2290 2290  #endif
2291 2291  .dtrace_interrupt_disable_done:
2292 2292          ret
2293 2293          SET_SIZE(dtrace_interrupt_disable)
2294 2294  
2295 2295  #elif defined(__i386)
2296 2296                  
2297 2297          ENTRY(dtrace_interrupt_disable)
2298 2298          pushfl
2299 2299          popl    %eax
2300 2300  #if defined(__xpv)
2301 2301          leal    xpv_panicking, %edx
2302 2302          movl    (%edx), %edx
2303 2303          cmpl    $0, %edx
2304 2304          jne     .dtrace_interrupt_disable_done
2305 2305          CLIRET(%edx, %cl)       /* returns event mask in %cl */
2306 2306          /*
2307 2307           * Synthesize the PS_IE bit from the event mask bit
2308 2308           */
2309 2309          andl    $_BITNOT(PS_IE), %eax
2310 2310          testb   $1, %cl
2311 2311          jnz     .dtrace_interrupt_disable_done
2312 2312          orl     $PS_IE, %eax
2313 2313  #else
2314 2314          CLI(%edx)
2315 2315  #endif
2316 2316  .dtrace_interrupt_disable_done:
2317 2317          ret
2318 2318          SET_SIZE(dtrace_interrupt_disable)
2319 2319  
2320 2320  #endif  /* __i386 */    
2321 2321  #endif  /* __lint */
2322 2322  
2323 2323  #if defined(__lint)
2324 2324  
2325 2325  /*ARGSUSED*/
2326 2326  void
2327 2327  dtrace_interrupt_enable(dtrace_icookie_t cookie)
2328 2328  {}
2329 2329  
2330 2330  #else   /* __lint */
2331 2331  
2332 2332  #if defined(__amd64)
2333 2333  
2334 2334          ENTRY(dtrace_interrupt_enable)
2335 2335          pushq   %rdi
2336 2336          popfq
2337 2337  #if defined(__xpv)
2338 2338          leaq    xpv_panicking, %rdx
2339 2339          movl    (%rdx), %edx
2340 2340          cmpl    $0, %edx
2341 2341          jne     .dtrace_interrupt_enable_done
2342 2342          /*
2343 2343           * Since we're -really- running unprivileged, our attempt
2344 2344           * to change the state of the IF bit will be ignored. The
2345 2345           * virtual IF bit is tweaked by CLI and STI.
2346 2346           */
2347 2347          IE_TO_EVENT_MASK(%rdx, %rdi)
2348 2348  #endif
2349 2349  .dtrace_interrupt_enable_done:
2350 2350          ret
2351 2351          SET_SIZE(dtrace_interrupt_enable)
2352 2352  
2353 2353  #elif defined(__i386)
2354 2354                  
2355 2355          ENTRY(dtrace_interrupt_enable)
2356 2356          movl    4(%esp), %eax
2357 2357          pushl   %eax
2358 2358          popfl
2359 2359  #if defined(__xpv)
2360 2360          leal    xpv_panicking, %edx
2361 2361          movl    (%edx), %edx
2362 2362          cmpl    $0, %edx
2363 2363          jne     .dtrace_interrupt_enable_done
2364 2364          /*
2365 2365           * Since we're -really- running unprivileged, our attempt
2366 2366           * to change the state of the IF bit will be ignored. The
2367 2367           * virtual IF bit is tweaked by CLI and STI.
2368 2368           */
2369 2369          IE_TO_EVENT_MASK(%edx, %eax)
2370 2370  #endif
2371 2371  .dtrace_interrupt_enable_done:
2372 2372          ret
2373 2373          SET_SIZE(dtrace_interrupt_enable)
2374 2374  
2375 2375  #endif  /* __i386 */    
2376 2376  #endif  /* __lint */
2377 2377  
2378 2378  
2379 2379  #if defined(lint)
2380 2380  
2381 2381  void
2382 2382  dtrace_membar_producer(void)
2383 2383  {}
2384 2384  
2385 2385  void
2386 2386  dtrace_membar_consumer(void)
2387 2387  {}
2388 2388  
2389 2389  #else   /* __lint */
2390 2390  
2391 2391          ENTRY(dtrace_membar_producer)
2392 2392          rep;    ret     /* use 2 byte return instruction when branch target */
2393 2393                          /* AMD Software Optimization Guide - Section 6.2 */
2394 2394          SET_SIZE(dtrace_membar_producer)
2395 2395  
2396 2396          ENTRY(dtrace_membar_consumer)
2397 2397          rep;    ret     /* use 2 byte return instruction when branch target */
2398 2398                          /* AMD Software Optimization Guide - Section 6.2 */
2399 2399          SET_SIZE(dtrace_membar_consumer)
2400 2400  
2401 2401  #endif  /* __lint */
2402 2402  
2403 2403  #if defined(__lint)
2404 2404  
2405 2405  kthread_id_t
2406 2406  threadp(void)
2407 2407  { return ((kthread_id_t)0); }
2408 2408  
2409 2409  #else   /* __lint */
2410 2410  
2411 2411  #if defined(__amd64)
2412 2412          
2413 2413          ENTRY(threadp)
2414 2414          movq    %gs:CPU_THREAD, %rax
2415 2415          ret
2416 2416          SET_SIZE(threadp)
2417 2417  
2418 2418  #elif defined(__i386)
2419 2419  
2420 2420          ENTRY(threadp)
2421 2421          movl    %gs:CPU_THREAD, %eax
2422 2422          ret
2423 2423          SET_SIZE(threadp)
2424 2424  
2425 2425  #endif  /* __i386 */
2426 2426  #endif  /* __lint */
2427 2427  
2428 2428  /*
2429 2429   *   Checksum routine for Internet Protocol Headers
2430 2430   */
2431 2431  
2432 2432  #if defined(__lint)
2433 2433  
2434 2434  /* ARGSUSED */
2435 2435  unsigned int
2436 2436  ip_ocsum(
2437 2437          ushort_t *address,      /* ptr to 1st message buffer */
2438 2438          int halfword_count,     /* length of data */
2439 2439          unsigned int sum)       /* partial checksum */
2440 2440  { 
2441 2441          int             i;
2442 2442          unsigned int    psum = 0;       /* partial sum */
2443 2443  
2444 2444          for (i = 0; i < halfword_count; i++, address++) {
2445 2445                  psum += *address;
2446 2446          }
2447 2447  
2448 2448          while ((psum >> 16) != 0) {
2449 2449                  psum = (psum & 0xffff) + (psum >> 16);
2450 2450          }
2451 2451  
2452 2452          psum += sum;
2453 2453  
2454 2454          while ((psum >> 16) != 0) {
2455 2455                  psum = (psum & 0xffff) + (psum >> 16);
2456 2456          }
2457 2457  
2458 2458          return (psum);
2459 2459  }
2460 2460  
2461 2461  #else   /* __lint */
2462 2462  
2463 2463  #if defined(__amd64)
2464 2464  
2465 2465          ENTRY(ip_ocsum)
2466 2466          pushq   %rbp
2467 2467          movq    %rsp, %rbp
2468 2468  #ifdef DEBUG
2469 2469          movq    postbootkernelbase(%rip), %rax
2470 2470          cmpq    %rax, %rdi
2471 2471          jnb     1f
2472 2472          xorl    %eax, %eax
2473 2473          movq    %rdi, %rsi
2474 2474          leaq    .ip_ocsum_panic_msg(%rip), %rdi
2475 2475          call    panic
2476 2476          /*NOTREACHED*/
2477 2477  .ip_ocsum_panic_msg:
2478 2478          .string "ip_ocsum: address 0x%p below kernelbase\n"
2479 2479  1:
2480 2480  #endif
2481 2481          movl    %esi, %ecx      /* halfword_count */
2482 2482          movq    %rdi, %rsi      /* address */
2483 2483                                  /* partial sum in %edx */
2484 2484          xorl    %eax, %eax
2485 2485          testl   %ecx, %ecx
2486 2486          jz      .ip_ocsum_done
2487 2487          testq   $3, %rsi
2488 2488          jnz     .ip_csum_notaligned
2489 2489  .ip_csum_aligned:       /* XX64 opportunities for 8-byte operations? */
2490 2490  .next_iter:
2491 2491          /* XX64 opportunities for prefetch? */
2492 2492          /* XX64 compute csum with 64 bit quantities? */
2493 2493          subl    $32, %ecx
2494 2494          jl      .less_than_32
2495 2495  
2496 2496          addl    0(%rsi), %edx
2497 2497  .only60:
2498 2498          adcl    4(%rsi), %eax
2499 2499  .only56:
2500 2500          adcl    8(%rsi), %edx
2501 2501  .only52:
2502 2502          adcl    12(%rsi), %eax
2503 2503  .only48:
2504 2504          adcl    16(%rsi), %edx
2505 2505  .only44:
2506 2506          adcl    20(%rsi), %eax
2507 2507  .only40:
2508 2508          adcl    24(%rsi), %edx
2509 2509  .only36:
2510 2510          adcl    28(%rsi), %eax
2511 2511  .only32:
2512 2512          adcl    32(%rsi), %edx
2513 2513  .only28:
2514 2514          adcl    36(%rsi), %eax
2515 2515  .only24:
2516 2516          adcl    40(%rsi), %edx
2517 2517  .only20:
2518 2518          adcl    44(%rsi), %eax
2519 2519  .only16:
2520 2520          adcl    48(%rsi), %edx
2521 2521  .only12:
2522 2522          adcl    52(%rsi), %eax
2523 2523  .only8:
2524 2524          adcl    56(%rsi), %edx
2525 2525  .only4:
2526 2526          adcl    60(%rsi), %eax  /* could be adding -1 and -1 with a carry */
2527 2527  .only0:
2528 2528          adcl    $0, %eax        /* could be adding -1 in eax with a carry */
2529 2529          adcl    $0, %eax
2530 2530  
2531 2531          addq    $64, %rsi
2532 2532          testl   %ecx, %ecx
2533 2533          jnz     .next_iter
2534 2534  
2535 2535  .ip_ocsum_done:
2536 2536          addl    %eax, %edx
2537 2537          adcl    $0, %edx
2538 2538          movl    %edx, %eax      /* form a 16 bit checksum by */
2539 2539          shrl    $16, %eax       /* adding two halves of 32 bit checksum */
2540 2540          addw    %dx, %ax
2541 2541          adcw    $0, %ax
2542 2542          andl    $0xffff, %eax
2543 2543          leave
2544 2544          ret
2545 2545  
2546 2546  .ip_csum_notaligned:
2547 2547          xorl    %edi, %edi
2548 2548          movw    (%rsi), %di
2549 2549          addl    %edi, %edx
2550 2550          adcl    $0, %edx
2551 2551          addq    $2, %rsi
2552 2552          decl    %ecx
2553 2553          jmp     .ip_csum_aligned
2554 2554  
2555 2555  .less_than_32:
2556 2556          addl    $32, %ecx
2557 2557          testl   $1, %ecx
2558 2558          jz      .size_aligned
2559 2559          andl    $0xfe, %ecx
2560 2560          movzwl  (%rsi, %rcx, 2), %edi
2561 2561          addl    %edi, %edx
2562 2562          adcl    $0, %edx
2563 2563  .size_aligned:
2564 2564          movl    %ecx, %edi
2565 2565          shrl    $1, %ecx
2566 2566          shl     $1, %edi
2567 2567          subq    $64, %rdi
2568 2568          addq    %rdi, %rsi
2569 2569          leaq    .ip_ocsum_jmptbl(%rip), %rdi
2570 2570          leaq    (%rdi, %rcx, 8), %rdi
2571 2571          xorl    %ecx, %ecx
2572 2572          clc
2573 2573          jmp     *(%rdi)
2574 2574  
2575 2575          .align  8
2576 2576  .ip_ocsum_jmptbl:
2577 2577          .quad   .only0, .only4, .only8, .only12, .only16, .only20
2578 2578          .quad   .only24, .only28, .only32, .only36, .only40, .only44
2579 2579          .quad   .only48, .only52, .only56, .only60
2580 2580          SET_SIZE(ip_ocsum)
2581 2581  
2582 2582  #elif defined(__i386)
2583 2583  
2584 2584          ENTRY(ip_ocsum)
2585 2585          pushl   %ebp
2586 2586          movl    %esp, %ebp
2587 2587          pushl   %ebx
2588 2588          pushl   %esi
2589 2589          pushl   %edi
2590 2590          movl    12(%ebp), %ecx  /* count of half words */
2591 2591          movl    16(%ebp), %edx  /* partial checksum */
2592 2592          movl    8(%ebp), %esi
2593 2593          xorl    %eax, %eax
2594 2594          testl   %ecx, %ecx
2595 2595          jz      .ip_ocsum_done
2596 2596  
2597 2597          testl   $3, %esi
2598 2598          jnz     .ip_csum_notaligned
2599 2599  .ip_csum_aligned:
2600 2600  .next_iter:
2601 2601          subl    $32, %ecx
2602 2602          jl      .less_than_32
2603 2603  
2604 2604          addl    0(%esi), %edx
2605 2605  .only60:
2606 2606          adcl    4(%esi), %eax
2607 2607  .only56:
2608 2608          adcl    8(%esi), %edx
2609 2609  .only52:
2610 2610          adcl    12(%esi), %eax
2611 2611  .only48:
2612 2612          adcl    16(%esi), %edx
2613 2613  .only44:
2614 2614          adcl    20(%esi), %eax
2615 2615  .only40:
2616 2616          adcl    24(%esi), %edx
2617 2617  .only36:
2618 2618          adcl    28(%esi), %eax
2619 2619  .only32:
2620 2620          adcl    32(%esi), %edx
2621 2621  .only28:
2622 2622          adcl    36(%esi), %eax
2623 2623  .only24:
2624 2624          adcl    40(%esi), %edx
2625 2625  .only20:
2626 2626          adcl    44(%esi), %eax
2627 2627  .only16:
2628 2628          adcl    48(%esi), %edx
2629 2629  .only12:
2630 2630          adcl    52(%esi), %eax
2631 2631  .only8:
2632 2632          adcl    56(%esi), %edx
2633 2633  .only4:
2634 2634          adcl    60(%esi), %eax  /* We could be adding -1 and -1 with a carry */
2635 2635  .only0:
2636 2636          adcl    $0, %eax        /* we could be adding -1 in eax with a carry */
2637 2637          adcl    $0, %eax
2638 2638  
2639 2639          addl    $64, %esi
2640 2640          andl    %ecx, %ecx
2641 2641          jnz     .next_iter
2642 2642  
2643 2643  .ip_ocsum_done:
2644 2644          addl    %eax, %edx
2645 2645          adcl    $0, %edx
2646 2646          movl    %edx, %eax      /* form a 16 bit checksum by */
2647 2647          shrl    $16, %eax       /* adding two halves of 32 bit checksum */
2648 2648          addw    %dx, %ax
2649 2649          adcw    $0, %ax
2650 2650          andl    $0xffff, %eax
2651 2651          popl    %edi            /* restore registers */
2652 2652          popl    %esi
2653 2653          popl    %ebx
2654 2654          leave
2655 2655          ret
2656 2656  
2657 2657  .ip_csum_notaligned:
2658 2658          xorl    %edi, %edi
2659 2659          movw    (%esi), %di
2660 2660          addl    %edi, %edx
2661 2661          adcl    $0, %edx
2662 2662          addl    $2, %esi
2663 2663          decl    %ecx
2664 2664          jmp     .ip_csum_aligned
2665 2665  
2666 2666  .less_than_32:
2667 2667          addl    $32, %ecx
2668 2668          testl   $1, %ecx
2669 2669          jz      .size_aligned
2670 2670          andl    $0xfe, %ecx
2671 2671          movzwl  (%esi, %ecx, 2), %edi
2672 2672          addl    %edi, %edx
2673 2673          adcl    $0, %edx
2674 2674  .size_aligned:
2675 2675          movl    %ecx, %edi
2676 2676          shrl    $1, %ecx
2677 2677          shl     $1, %edi
2678 2678          subl    $64, %edi
2679 2679          addl    %edi, %esi
2680 2680          movl    $.ip_ocsum_jmptbl, %edi
2681 2681          lea     (%edi, %ecx, 4), %edi
2682 2682          xorl    %ecx, %ecx
2683 2683          clc
2684 2684          jmp     *(%edi)
2685 2685          SET_SIZE(ip_ocsum)
2686 2686  
2687 2687          .data
2688 2688          .align  4
2689 2689  
2690 2690  .ip_ocsum_jmptbl:
2691 2691          .long   .only0, .only4, .only8, .only12, .only16, .only20
2692 2692          .long   .only24, .only28, .only32, .only36, .only40, .only44
2693 2693          .long   .only48, .only52, .only56, .only60
2694 2694  
2695 2695          
2696 2696  #endif  /* __i386 */            
2697 2697  #endif  /* __lint */
2698 2698  
2699 2699  /*
2700 2700   * multiply two long numbers and yield a u_longlong_t result, callable from C.
2701 2701   * Provided to manipulate hrtime_t values.
2702 2702   */
2703 2703  #if defined(__lint)
2704 2704  
2705 2705  /* result = a * b; */
2706 2706  
2707 2707  /* ARGSUSED */
2708 2708  unsigned long long
2709 2709  mul32(uint_t a, uint_t b)
2710 2710  { return (0); }
2711 2711  
2712 2712  #else   /* __lint */
2713 2713  
2714 2714  #if defined(__amd64)
2715 2715  
2716 2716          ENTRY(mul32)
2717 2717          xorl    %edx, %edx      /* XX64 joe, paranoia? */
2718 2718          movl    %edi, %eax
2719 2719          mull    %esi
2720 2720          shlq    $32, %rdx       
2721 2721          orq     %rdx, %rax
2722 2722          ret
2723 2723          SET_SIZE(mul32)
2724 2724  
2725 2725  #elif defined(__i386)
2726 2726  
2727 2727          ENTRY(mul32)
2728 2728          movl    8(%esp), %eax
2729 2729          movl    4(%esp), %ecx
2730 2730          mull    %ecx
2731 2731          ret
2732 2732          SET_SIZE(mul32)
2733 2733  
2734 2734  #endif  /* __i386 */
2735 2735  #endif  /* __lint */
2736 2736  
2737 2737  #if defined(notused)
2738 2738  #if defined(__lint)
2739 2739  /* ARGSUSED */
2740 2740  void
2741 2741  load_pte64(uint64_t *pte, uint64_t pte_value)
2742 2742  {}
2743 2743  #else   /* __lint */
2744 2744          .globl load_pte64
2745 2745  load_pte64:
2746 2746          movl    4(%esp), %eax
2747 2747          movl    8(%esp), %ecx
2748 2748          movl    12(%esp), %edx
2749 2749          movl    %edx, 4(%eax)
2750 2750          movl    %ecx, (%eax)
2751 2751          ret
2752 2752  #endif  /* __lint */
2753 2753  #endif  /* notused */
2754 2754  
2755 2755  #if defined(__lint)
2756 2756  
2757 2757  /*ARGSUSED*/
2758 2758  void
2759 2759  scan_memory(caddr_t addr, size_t size)
2760 2760  {}
2761 2761  
2762 2762  #else   /* __lint */
2763 2763  
2764 2764  #if defined(__amd64)
2765 2765  
2766 2766          ENTRY(scan_memory)
2767 2767          shrq    $3, %rsi        /* convert %rsi from byte to quadword count */
2768 2768          jz      .scanm_done
2769 2769          movq    %rsi, %rcx      /* move count into rep control register */
2770 2770          movq    %rdi, %rsi      /* move addr into lodsq control reg. */
2771 2771          rep lodsq               /* scan the memory range */
2772 2772  .scanm_done:
2773 2773          rep;    ret     /* use 2 byte return instruction when branch target */
2774 2774                          /* AMD Software Optimization Guide - Section 6.2 */
2775 2775          SET_SIZE(scan_memory)
2776 2776  
2777 2777  #elif defined(__i386)
2778 2778  
2779 2779          ENTRY(scan_memory)
2780 2780          pushl   %ecx
2781 2781          pushl   %esi
2782 2782          movl    16(%esp), %ecx  /* move 2nd arg into rep control register */
2783 2783          shrl    $2, %ecx        /* convert from byte count to word count */
2784 2784          jz      .scanm_done
2785 2785          movl    12(%esp), %esi  /* move 1st arg into lodsw control register */
2786 2786          .byte   0xf3            /* rep prefix.  lame assembler.  sigh. */
2787 2787          lodsl
2788 2788  .scanm_done:
2789 2789          popl    %esi
2790 2790          popl    %ecx
2791 2791          ret
2792 2792          SET_SIZE(scan_memory)
2793 2793  
2794 2794  #endif  /* __i386 */
2795 2795  #endif  /* __lint */
2796 2796  
2797 2797  
2798 2798  #if defined(__lint)
2799 2799  
2800 2800  /*ARGSUSED */
2801 2801  int
2802 2802  lowbit(ulong_t i)
2803 2803  { return (0); }
2804 2804  
2805 2805  #else   /* __lint */
2806 2806  
2807 2807  #if defined(__amd64)
2808 2808  
2809 2809          ENTRY(lowbit)
2810 2810          movl    $-1, %eax
2811 2811          bsfq    %rdi, %rdi
2812 2812          cmovnz  %edi, %eax
2813 2813          incl    %eax
2814 2814          ret
2815 2815          SET_SIZE(lowbit)
2816 2816  
2817 2817  #elif defined(__i386)
2818 2818  
2819 2819          ENTRY(lowbit)
2820 2820          bsfl    4(%esp), %eax
2821 2821          jz      0f
2822 2822          incl    %eax
2823 2823          ret
2824 2824  0:
2825 2825          xorl    %eax, %eax
2826 2826          ret
2827 2827          SET_SIZE(lowbit)
2828 2828  
2829 2829  #endif  /* __i386 */
2830 2830  #endif  /* __lint */
2831 2831  
2832 2832  #if defined(__lint)
2833 2833  
2834 2834  /*ARGSUSED*/
2835 2835  int
2836 2836  highbit(ulong_t i)
2837 2837  { return (0); }
2838 2838  
2839 2839  /*ARGSUSED*/
2840 2840  int
2841 2841  highbit64(uint64_t i)
2842 2842  { return (0); }
2843 2843  
2844 2844  #else   /* __lint */
2845 2845  
2846 2846  #if defined(__amd64)
2847 2847  
2848 2848          ENTRY(highbit)
2849 2849          ALTENTRY(highbit64)
2850 2850          movl    $-1, %eax
2851 2851          bsrq    %rdi, %rdi
2852 2852          cmovnz  %edi, %eax
2853 2853          incl    %eax
2854 2854          ret
2855 2855          SET_SIZE(highbit64)
2856 2856          SET_SIZE(highbit)
2857 2857  
2858 2858  #elif defined(__i386)
2859 2859  
2860 2860          ENTRY(highbit)
2861 2861          bsrl    4(%esp), %eax
2862 2862          jz      0f
2863 2863          incl    %eax
2864 2864          ret
2865 2865  0:
2866 2866          xorl    %eax, %eax
2867 2867          ret    
2868 2868          SET_SIZE(highbit)
2869 2869  
2870 2870          ENTRY(highbit64)
2871 2871          bsrl    8(%esp), %eax
2872 2872          jz      highbit
2873 2873          addl    $33, %eax
2874 2874          ret
2875 2875          SET_SIZE(highbit64)
2876 2876  
2877 2877  #endif  /* __i386 */
2878 2878  #endif  /* __lint */
2879 2879  
2880 2880  #if defined(__lint)
2881 2881  
2882 2882  /*ARGSUSED*/
2883 2883  uint64_t
2884 2884  rdmsr(uint_t r)
2885 2885  { return (0); }
2886 2886  
2887 2887  /*ARGSUSED*/
2888 2888  void
2889 2889  wrmsr(uint_t r, const uint64_t val)
2890 2890  {}
2891 2891  
2892 2892  /*ARGSUSED*/
2893 2893  uint64_t
2894 2894  xrdmsr(uint_t r)
2895 2895  { return (0); }
2896 2896  
2897 2897  /*ARGSUSED*/
2898 2898  void
2899 2899  xwrmsr(uint_t r, const uint64_t val)
2900 2900  {}
2901 2901  
2902 2902  void
2903 2903  invalidate_cache(void)
2904 2904  {}
2905 2905  
2906 2906  /*ARGSUSED*/
2907 2907  uint64_t
2908 2908  get_xcr(uint_t r)
2909 2909  { return (0); }
2910 2910  
2911 2911  /*ARGSUSED*/
2912 2912  void
2913 2913  set_xcr(uint_t r, const uint64_t val)
2914 2914  {}
2915 2915  
2916 2916  #else  /* __lint */
2917 2917  
2918 2918  #define XMSR_ACCESS_VAL         $0x9c5a203a
2919 2919  
2920 2920  #if defined(__amd64)
2921 2921          
2922 2922          ENTRY(rdmsr)
2923 2923          movl    %edi, %ecx
2924 2924          rdmsr
2925 2925          shlq    $32, %rdx
2926 2926          orq     %rdx, %rax
2927 2927          ret
2928 2928          SET_SIZE(rdmsr)
2929 2929  
2930 2930          ENTRY(wrmsr)
2931 2931          movq    %rsi, %rdx
2932 2932          shrq    $32, %rdx
2933 2933          movl    %esi, %eax
2934 2934          movl    %edi, %ecx
2935 2935          wrmsr
2936 2936          ret
2937 2937          SET_SIZE(wrmsr)
2938 2938  
2939 2939          ENTRY(xrdmsr)
2940 2940          pushq   %rbp
2941 2941          movq    %rsp, %rbp
2942 2942          movl    %edi, %ecx
2943 2943          movl    XMSR_ACCESS_VAL, %edi   /* this value is needed to access MSR */
2944 2944          rdmsr
2945 2945          shlq    $32, %rdx
2946 2946          orq     %rdx, %rax
2947 2947          leave
2948 2948          ret
2949 2949          SET_SIZE(xrdmsr)
2950 2950  
2951 2951          ENTRY(xwrmsr)
2952 2952          pushq   %rbp
2953 2953          movq    %rsp, %rbp
2954 2954          movl    %edi, %ecx
2955 2955          movl    XMSR_ACCESS_VAL, %edi   /* this value is needed to access MSR */
2956 2956          movq    %rsi, %rdx
2957 2957          shrq    $32, %rdx
2958 2958          movl    %esi, %eax
2959 2959          wrmsr
2960 2960          leave
2961 2961          ret
2962 2962          SET_SIZE(xwrmsr)
2963 2963  
2964 2964          ENTRY(get_xcr)
2965 2965          movl    %edi, %ecx
2966 2966          #xgetbv
2967 2967          .byte   0x0f,0x01,0xd0
2968 2968          shlq    $32, %rdx
2969 2969          orq     %rdx, %rax
2970 2970          ret
2971 2971          SET_SIZE(get_xcr)
2972 2972  
2973 2973          ENTRY(set_xcr)
2974 2974          movq    %rsi, %rdx
2975 2975          shrq    $32, %rdx
2976 2976          movl    %esi, %eax
2977 2977          movl    %edi, %ecx
2978 2978          #xsetbv
2979 2979          .byte   0x0f,0x01,0xd1
2980 2980          ret
2981 2981          SET_SIZE(set_xcr)
2982 2982  
2983 2983  #elif defined(__i386)
2984 2984  
2985 2985          ENTRY(rdmsr)
2986 2986          movl    4(%esp), %ecx
2987 2987          rdmsr
2988 2988          ret
2989 2989          SET_SIZE(rdmsr)
2990 2990  
2991 2991          ENTRY(wrmsr)
2992 2992          movl    4(%esp), %ecx
2993 2993          movl    8(%esp), %eax
2994 2994          movl    12(%esp), %edx 
2995 2995          wrmsr
2996 2996          ret
2997 2997          SET_SIZE(wrmsr)
2998 2998  
2999 2999          ENTRY(xrdmsr)
3000 3000          pushl   %ebp
3001 3001          movl    %esp, %ebp
3002 3002          movl    8(%esp), %ecx
3003 3003          pushl   %edi
3004 3004          movl    XMSR_ACCESS_VAL, %edi   /* this value is needed to access MSR */
3005 3005          rdmsr
3006 3006          popl    %edi
3007 3007          leave
3008 3008          ret
3009 3009          SET_SIZE(xrdmsr)
3010 3010  
3011 3011          ENTRY(xwrmsr)
3012 3012          pushl   %ebp
3013 3013          movl    %esp, %ebp
3014 3014          movl    8(%esp), %ecx
3015 3015          movl    12(%esp), %eax
3016 3016          movl    16(%esp), %edx 
3017 3017          pushl   %edi
3018 3018          movl    XMSR_ACCESS_VAL, %edi   /* this value is needed to access MSR */
3019 3019          wrmsr
3020 3020          popl    %edi
3021 3021          leave
3022 3022          ret
3023 3023          SET_SIZE(xwrmsr)
3024 3024  
3025 3025          ENTRY(get_xcr)
3026 3026          movl    4(%esp), %ecx
3027 3027          #xgetbv
3028 3028          .byte   0x0f,0x01,0xd0
3029 3029          ret
3030 3030          SET_SIZE(get_xcr)
3031 3031  
3032 3032          ENTRY(set_xcr)
3033 3033          movl    4(%esp), %ecx
3034 3034          movl    8(%esp), %eax
3035 3035          movl    12(%esp), %edx
3036 3036          #xsetbv
3037 3037          .byte   0x0f,0x01,0xd1
3038 3038          ret
3039 3039          SET_SIZE(set_xcr)
3040 3040  
3041 3041  #endif  /* __i386 */
3042 3042  
3043 3043          ENTRY(invalidate_cache)
3044 3044          wbinvd
3045 3045          ret
3046 3046          SET_SIZE(invalidate_cache)
3047 3047  
3048 3048  #endif  /* __lint */
3049 3049  
3050 3050  #if defined(__lint)
3051 3051  
3052 3052  /*ARGSUSED*/
3053 3053  void
3054 3054  getcregs(struct cregs *crp)
3055 3055  {}
3056 3056  
3057 3057  #else   /* __lint */
3058 3058  
3059 3059  #if defined(__amd64)
3060 3060  
3061 3061          ENTRY_NP(getcregs)
3062 3062  #if defined(__xpv)
3063 3063          /*
3064 3064           * Only a few of the hardware control registers or descriptor tables
3065 3065           * are directly accessible to us, so just zero the structure.
3066 3066           *
3067 3067           * XXPV Perhaps it would be helpful for the hypervisor to return
3068 3068           *      virtualized versions of these for post-mortem use.
3069 3069           *      (Need to reevaluate - perhaps it already does!)
3070 3070           */
3071 3071          pushq   %rdi            /* save *crp */
3072 3072          movq    $CREGSZ, %rsi
3073 3073          call    bzero
3074 3074          popq    %rdi
3075 3075  
3076 3076          /*
3077 3077           * Dump what limited information we can
3078 3078           */
3079 3079          movq    %cr0, %rax
3080 3080          movq    %rax, CREG_CR0(%rdi)    /* cr0 */
3081 3081          movq    %cr2, %rax
3082 3082          movq    %rax, CREG_CR2(%rdi)    /* cr2 */
3083 3083          movq    %cr3, %rax
3084 3084          movq    %rax, CREG_CR3(%rdi)    /* cr3 */
3085 3085          movq    %cr4, %rax
3086 3086          movq    %rax, CREG_CR4(%rdi)    /* cr4 */
3087 3087  
3088 3088  #else   /* __xpv */
3089 3089  
3090 3090  #define GETMSR(r, off, d)       \
3091 3091          movl    $r, %ecx;       \
3092 3092          rdmsr;                  \
3093 3093          movl    %eax, off(d);   \
3094 3094          movl    %edx, off+4(d)
3095 3095  
3096 3096          xorl    %eax, %eax
3097 3097          movq    %rax, CREG_GDT+8(%rdi)
3098 3098          sgdt    CREG_GDT(%rdi)          /* 10 bytes */
3099 3099          movq    %rax, CREG_IDT+8(%rdi)
3100 3100          sidt    CREG_IDT(%rdi)          /* 10 bytes */
3101 3101          movq    %rax, CREG_LDT(%rdi)
3102 3102          sldt    CREG_LDT(%rdi)          /* 2 bytes */
3103 3103          movq    %rax, CREG_TASKR(%rdi)
3104 3104          str     CREG_TASKR(%rdi)        /* 2 bytes */
3105 3105          movq    %cr0, %rax
3106 3106          movq    %rax, CREG_CR0(%rdi)    /* cr0 */
3107 3107          movq    %cr2, %rax
3108 3108          movq    %rax, CREG_CR2(%rdi)    /* cr2 */
3109 3109          movq    %cr3, %rax
3110 3110          movq    %rax, CREG_CR3(%rdi)    /* cr3 */
3111 3111          movq    %cr4, %rax
3112 3112          movq    %rax, CREG_CR4(%rdi)    /* cr4 */
3113 3113          movq    %cr8, %rax
3114 3114          movq    %rax, CREG_CR8(%rdi)    /* cr8 */
3115 3115          GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
3116 3116          GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
3117 3117  #endif  /* __xpv */
3118 3118          ret
3119 3119          SET_SIZE(getcregs)
3120 3120  
3121 3121  #undef GETMSR
3122 3122  
3123 3123  #elif defined(__i386)
3124 3124  
3125 3125          ENTRY_NP(getcregs)
3126 3126  #if defined(__xpv)
3127 3127          /*
3128 3128           * Only a few of the hardware control registers or descriptor tables
3129 3129           * are directly accessible to us, so just zero the structure.
3130 3130           *
3131 3131           * XXPV Perhaps it would be helpful for the hypervisor to return
3132 3132           *      virtualized versions of these for post-mortem use.
3133 3133           *      (Need to reevaluate - perhaps it already does!)
3134 3134           */
3135 3135          movl    4(%esp), %edx
3136 3136          pushl   $CREGSZ
3137 3137          pushl   %edx
3138 3138          call    bzero
3139 3139          addl    $8, %esp
3140 3140          movl    4(%esp), %edx
3141 3141  
3142 3142          /*
3143 3143           * Dump what limited information we can
3144 3144           */
3145 3145          movl    %cr0, %eax
3146 3146          movl    %eax, CREG_CR0(%edx)    /* cr0 */
3147 3147          movl    %cr2, %eax
3148 3148          movl    %eax, CREG_CR2(%edx)    /* cr2 */
3149 3149          movl    %cr3, %eax
3150 3150          movl    %eax, CREG_CR3(%edx)    /* cr3 */
3151 3151          movl    %cr4, %eax
3152 3152          movl    %eax, CREG_CR4(%edx)    /* cr4 */
3153 3153  
3154 3154  #else   /* __xpv */
3155 3155  
3156 3156          movl    4(%esp), %edx
3157 3157          movw    $0, CREG_GDT+6(%edx)
3158 3158          movw    $0, CREG_IDT+6(%edx)
3159 3159          sgdt    CREG_GDT(%edx)          /* gdt */
3160 3160          sidt    CREG_IDT(%edx)          /* idt */
3161 3161          sldt    CREG_LDT(%edx)          /* ldt */
3162 3162          str     CREG_TASKR(%edx)        /* task */
3163 3163          movl    %cr0, %eax
3164 3164          movl    %eax, CREG_CR0(%edx)    /* cr0 */
3165 3165          movl    %cr2, %eax
3166 3166          movl    %eax, CREG_CR2(%edx)    /* cr2 */
3167 3167          movl    %cr3, %eax
3168 3168          movl    %eax, CREG_CR3(%edx)    /* cr3 */
3169 3169          bt      $X86FSET_LARGEPAGE, x86_featureset
3170 3170          jnc     .nocr4
3171 3171          movl    %cr4, %eax
3172 3172          movl    %eax, CREG_CR4(%edx)    /* cr4 */
3173 3173          jmp     .skip
3174 3174  .nocr4:
3175 3175          movl    $0, CREG_CR4(%edx)
3176 3176  .skip:
3177 3177  #endif
3178 3178          ret
3179 3179          SET_SIZE(getcregs)
3180 3180  
3181 3181  #endif  /* __i386 */
3182 3182  #endif  /* __lint */
3183 3183  
3184 3184  
3185 3185  /*
3186 3186   * A panic trigger is a word which is updated atomically and can only be set
3187 3187   * once.  We atomically store 0xDEFACEDD and load the old value.  If the
3188 3188   * previous value was 0, we succeed and return 1; otherwise return 0.
3189 3189   * This allows a partially corrupt trigger to still trigger correctly.  DTrace
3190 3190   * has its own version of this function to allow it to panic correctly from
3191 3191   * probe context.
3192 3192   */
3193 3193  #if defined(__lint)
3194 3194  
3195 3195  /*ARGSUSED*/
3196 3196  int
3197 3197  panic_trigger(int *tp)
3198 3198  { return (0); }
3199 3199  
3200 3200  /*ARGSUSED*/
3201 3201  int
3202 3202  dtrace_panic_trigger(int *tp)
3203 3203  { return (0); }
3204 3204  
3205 3205  #else   /* __lint */
3206 3206  
3207 3207  #if defined(__amd64)
3208 3208  
3209 3209          ENTRY_NP(panic_trigger)
3210 3210          xorl    %eax, %eax
3211 3211          movl    $0xdefacedd, %edx
3212 3212          lock
3213 3213            xchgl %edx, (%rdi)
3214 3214          cmpl    $0, %edx
3215 3215          je      0f 
3216 3216          movl    $0, %eax
3217 3217          ret
3218 3218  0:      movl    $1, %eax
3219 3219          ret
3220 3220          SET_SIZE(panic_trigger)
3221 3221          
3222 3222          ENTRY_NP(dtrace_panic_trigger)
3223 3223          xorl    %eax, %eax
3224 3224          movl    $0xdefacedd, %edx
3225 3225          lock
3226 3226            xchgl %edx, (%rdi)
3227 3227          cmpl    $0, %edx
3228 3228          je      0f
3229 3229          movl    $0, %eax
3230 3230          ret
3231 3231  0:      movl    $1, %eax
3232 3232          ret
3233 3233          SET_SIZE(dtrace_panic_trigger)
3234 3234  
3235 3235  #elif defined(__i386)
3236 3236  
3237 3237          ENTRY_NP(panic_trigger)
3238 3238          movl    4(%esp), %edx           / %edx = address of trigger
3239 3239          movl    $0xdefacedd, %eax       / %eax = 0xdefacedd
3240 3240          lock                            / assert lock
3241 3241          xchgl %eax, (%edx)              / exchange %eax and the trigger
3242 3242          cmpl    $0, %eax                / if (%eax == 0x0)
3243 3243          je      0f                      /   return (1);
3244 3244          movl    $0, %eax                / else
3245 3245          ret                             /   return (0);
3246 3246  0:      movl    $1, %eax
3247 3247          ret
3248 3248          SET_SIZE(panic_trigger)
3249 3249  
3250 3250          ENTRY_NP(dtrace_panic_trigger)
3251 3251          movl    4(%esp), %edx           / %edx = address of trigger
3252 3252          movl    $0xdefacedd, %eax       / %eax = 0xdefacedd
3253 3253          lock                            / assert lock
3254 3254          xchgl %eax, (%edx)              / exchange %eax and the trigger
3255 3255          cmpl    $0, %eax                / if (%eax == 0x0)
3256 3256          je      0f                      /   return (1);
3257 3257          movl    $0, %eax                / else
3258 3258          ret                             /   return (0);
3259 3259  0:      movl    $1, %eax
3260 3260          ret
3261 3261          SET_SIZE(dtrace_panic_trigger)
3262 3262  
3263 3263  #endif  /* __i386 */
3264 3264  #endif  /* __lint */
3265 3265  
3266 3266  /*
3267 3267   * The panic() and cmn_err() functions invoke vpanic() as a common entry point
3268 3268   * into the panic code implemented in panicsys().  vpanic() is responsible
3269 3269   * for passing through the format string and arguments, and constructing a
3270 3270   * regs structure on the stack into which it saves the current register
3271 3271   * values.  If we are not dying due to a fatal trap, these registers will
3272 3272   * then be preserved in panicbuf as the current processor state.  Before
3273 3273   * invoking panicsys(), vpanic() activates the first panic trigger (see
3274 3274   * common/os/panic.c) and switches to the panic_stack if successful.  Note that
3275 3275   * DTrace takes a slightly different panic path if it must panic from probe
3276 3276   * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
3277 3277   * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
3278 3278   * branches back into vpanic().
3279 3279   */
3280 3280  #if defined(__lint)
3281 3281  
3282 3282  /*ARGSUSED*/
3283 3283  void
3284 3284  vpanic(const char *format, va_list alist)
3285 3285  {}
3286 3286  
3287 3287  /*ARGSUSED*/
3288 3288  void
3289 3289  dtrace_vpanic(const char *format, va_list alist)
3290 3290  {}
3291 3291  
3292 3292  #else   /* __lint */
3293 3293  
3294 3294  #if defined(__amd64)
3295 3295  
3296 3296          ENTRY_NP(vpanic)                        /* Initial stack layout: */
3297 3297          
3298 3298          pushq   %rbp                            /* | %rip |     0x60    */
3299 3299          movq    %rsp, %rbp                      /* | %rbp |     0x58    */
3300 3300          pushfq                                  /* | rfl  |     0x50    */
3301 3301          pushq   %r11                            /* | %r11 |     0x48    */
3302 3302          pushq   %r10                            /* | %r10 |     0x40    */
3303 3303          pushq   %rbx                            /* | %rbx |     0x38    */
3304 3304          pushq   %rax                            /* | %rax |     0x30    */
3305 3305          pushq   %r9                             /* | %r9  |     0x28    */
3306 3306          pushq   %r8                             /* | %r8  |     0x20    */
3307 3307          pushq   %rcx                            /* | %rcx |     0x18    */
3308 3308          pushq   %rdx                            /* | %rdx |     0x10    */
3309 3309          pushq   %rsi                            /* | %rsi |     0x8 alist */
3310 3310          pushq   %rdi                            /* | %rdi |     0x0 format */
3311 3311  
3312 3312          movq    %rsp, %rbx                      /* %rbx = current %rsp */
3313 3313  
3314 3314          leaq    panic_quiesce(%rip), %rdi       /* %rdi = &panic_quiesce */
3315 3315          call    panic_trigger                   /* %eax = panic_trigger() */
3316 3316  
3317 3317  vpanic_common:
3318 3318          /*
3319 3319           * The panic_trigger result is in %eax from the call above, and
3320 3320           * dtrace_panic places it in %eax before branching here.
3321 3321           * The rdmsr instructions that follow below will clobber %eax so
3322 3322           * we stash the panic_trigger result in %r11d.
3323 3323           */
3324 3324          movl    %eax, %r11d
3325 3325          cmpl    $0, %r11d
3326 3326          je      0f
3327 3327  
3328 3328          /*
3329 3329           * If panic_trigger() was successful, we are the first to initiate a
3330 3330           * panic: we now switch to the reserved panic_stack before continuing.
3331 3331           */
3332 3332          leaq    panic_stack(%rip), %rsp
3333 3333          addq    $PANICSTKSIZE, %rsp
3334 3334  0:      subq    $REGSIZE, %rsp
3335 3335          /*
3336 3336           * Now that we've got everything set up, store the register values as
3337 3337           * they were when we entered vpanic() to the designated location in
3338 3338           * the regs structure we allocated on the stack.
3339 3339           */
3340 3340          movq    0x0(%rbx), %rcx
3341 3341          movq    %rcx, REGOFF_RDI(%rsp)
3342 3342          movq    0x8(%rbx), %rcx
3343 3343          movq    %rcx, REGOFF_RSI(%rsp)
3344 3344          movq    0x10(%rbx), %rcx
3345 3345          movq    %rcx, REGOFF_RDX(%rsp)
3346 3346          movq    0x18(%rbx), %rcx
3347 3347          movq    %rcx, REGOFF_RCX(%rsp)
3348 3348          movq    0x20(%rbx), %rcx
3349 3349  
3350 3350          movq    %rcx, REGOFF_R8(%rsp)
3351 3351          movq    0x28(%rbx), %rcx
3352 3352          movq    %rcx, REGOFF_R9(%rsp)
3353 3353          movq    0x30(%rbx), %rcx
3354 3354          movq    %rcx, REGOFF_RAX(%rsp)
3355 3355          movq    0x38(%rbx), %rcx
3356 3356          movq    %rcx, REGOFF_RBX(%rsp)
3357 3357          movq    0x58(%rbx), %rcx
3358 3358  
3359 3359          movq    %rcx, REGOFF_RBP(%rsp)
3360 3360          movq    0x40(%rbx), %rcx
3361 3361          movq    %rcx, REGOFF_R10(%rsp)
3362 3362          movq    0x48(%rbx), %rcx
3363 3363          movq    %rcx, REGOFF_R11(%rsp)
3364 3364          movq    %r12, REGOFF_R12(%rsp)
3365 3365  
3366 3366          movq    %r13, REGOFF_R13(%rsp)
3367 3367          movq    %r14, REGOFF_R14(%rsp)
3368 3368          movq    %r15, REGOFF_R15(%rsp)
3369 3369  
3370 3370          xorl    %ecx, %ecx
3371 3371          movw    %ds, %cx
3372 3372          movq    %rcx, REGOFF_DS(%rsp)
3373 3373          movw    %es, %cx
3374 3374          movq    %rcx, REGOFF_ES(%rsp)
3375 3375          movw    %fs, %cx
3376 3376          movq    %rcx, REGOFF_FS(%rsp)
3377 3377          movw    %gs, %cx
3378 3378          movq    %rcx, REGOFF_GS(%rsp)
3379 3379  
3380 3380          movq    $0, REGOFF_TRAPNO(%rsp)
3381 3381  
3382 3382          movq    $0, REGOFF_ERR(%rsp)
3383 3383          leaq    vpanic(%rip), %rcx
3384 3384          movq    %rcx, REGOFF_RIP(%rsp)
3385 3385          movw    %cs, %cx
3386 3386          movzwq  %cx, %rcx
3387 3387          movq    %rcx, REGOFF_CS(%rsp)
3388 3388          movq    0x50(%rbx), %rcx
3389 3389          movq    %rcx, REGOFF_RFL(%rsp)
3390 3390          movq    %rbx, %rcx
3391 3391          addq    $0x60, %rcx
3392 3392          movq    %rcx, REGOFF_RSP(%rsp)
3393 3393          movw    %ss, %cx
3394 3394          movzwq  %cx, %rcx
3395 3395          movq    %rcx, REGOFF_SS(%rsp)
3396 3396  
3397 3397          /*
3398 3398           * panicsys(format, alist, rp, on_panic_stack) 
3399 3399           */     
3400 3400          movq    REGOFF_RDI(%rsp), %rdi          /* format */
3401 3401          movq    REGOFF_RSI(%rsp), %rsi          /* alist */
3402 3402          movq    %rsp, %rdx                      /* struct regs */
3403 3403          movl    %r11d, %ecx                     /* on_panic_stack */
3404 3404          call    panicsys
3405 3405          addq    $REGSIZE, %rsp
3406 3406          popq    %rdi
3407 3407          popq    %rsi
3408 3408          popq    %rdx
3409 3409          popq    %rcx
3410 3410          popq    %r8
3411 3411          popq    %r9
3412 3412          popq    %rax
3413 3413          popq    %rbx
3414 3414          popq    %r10
3415 3415          popq    %r11
3416 3416          popfq
3417 3417          leave
3418 3418          ret
3419 3419          SET_SIZE(vpanic)
3420 3420  
3421 3421          ENTRY_NP(dtrace_vpanic)                 /* Initial stack layout: */
3422 3422  
3423 3423          pushq   %rbp                            /* | %rip |     0x60    */
3424 3424          movq    %rsp, %rbp                      /* | %rbp |     0x58    */
3425 3425          pushfq                                  /* | rfl  |     0x50    */
3426 3426          pushq   %r11                            /* | %r11 |     0x48    */
3427 3427          pushq   %r10                            /* | %r10 |     0x40    */
3428 3428          pushq   %rbx                            /* | %rbx |     0x38    */
3429 3429          pushq   %rax                            /* | %rax |     0x30    */
3430 3430          pushq   %r9                             /* | %r9  |     0x28    */
3431 3431          pushq   %r8                             /* | %r8  |     0x20    */
3432 3432          pushq   %rcx                            /* | %rcx |     0x18    */
3433 3433          pushq   %rdx                            /* | %rdx |     0x10    */
3434 3434          pushq   %rsi                            /* | %rsi |     0x8 alist */
3435 3435          pushq   %rdi                            /* | %rdi |     0x0 format */
3436 3436  
3437 3437          movq    %rsp, %rbx                      /* %rbx = current %rsp */
3438 3438  
3439 3439          leaq    panic_quiesce(%rip), %rdi       /* %rdi = &panic_quiesce */
3440 3440          call    dtrace_panic_trigger    /* %eax = dtrace_panic_trigger() */
3441 3441          jmp     vpanic_common
3442 3442  
3443 3443          SET_SIZE(dtrace_vpanic)
3444 3444  
3445 3445  #elif defined(__i386)
3446 3446  
3447 3447          ENTRY_NP(vpanic)                        / Initial stack layout:
3448 3448  
3449 3449          pushl   %ebp                            / | %eip | 20
3450 3450          movl    %esp, %ebp                      / | %ebp | 16
3451 3451          pushl   %eax                            / | %eax | 12
3452 3452          pushl   %ebx                            / | %ebx |  8
3453 3453          pushl   %ecx                            / | %ecx |  4
3454 3454          pushl   %edx                            / | %edx |  0
3455 3455  
3456 3456          movl    %esp, %ebx                      / %ebx = current stack pointer
3457 3457  
3458 3458          lea     panic_quiesce, %eax             / %eax = &panic_quiesce
3459 3459          pushl   %eax                            / push &panic_quiesce
3460 3460          call    panic_trigger                   / %eax = panic_trigger()
3461 3461          addl    $4, %esp                        / reset stack pointer
3462 3462  
3463 3463  vpanic_common:
3464 3464          cmpl    $0, %eax                        / if (%eax == 0)
3465 3465          je      0f                              /   goto 0f;
3466 3466  
3467 3467          /*
3468 3468           * If panic_trigger() was successful, we are the first to initiate a
3469 3469           * panic: we now switch to the reserved panic_stack before continuing.
3470 3470           */
3471 3471          lea     panic_stack, %esp               / %esp  = panic_stack
3472 3472          addl    $PANICSTKSIZE, %esp             / %esp += PANICSTKSIZE
3473 3473  
3474 3474  0:      subl    $REGSIZE, %esp                  / allocate struct regs
3475 3475  
3476 3476          /*
3477 3477           * Now that we've got everything set up, store the register values as
3478 3478           * they were when we entered vpanic() to the designated location in
3479 3479           * the regs structure we allocated on the stack. 
3480 3480           */
3481 3481  #if !defined(__GNUC_AS__)
3482 3482          movw    %gs, %edx
3483 3483          movl    %edx, REGOFF_GS(%esp)
3484 3484          movw    %fs, %edx
3485 3485          movl    %edx, REGOFF_FS(%esp)
3486 3486          movw    %es, %edx
3487 3487          movl    %edx, REGOFF_ES(%esp)
3488 3488          movw    %ds, %edx
3489 3489          movl    %edx, REGOFF_DS(%esp)
3490 3490  #else   /* __GNUC_AS__ */
3491 3491          mov     %gs, %edx
3492 3492          mov     %edx, REGOFF_GS(%esp)
3493 3493          mov     %fs, %edx
3494 3494          mov     %edx, REGOFF_FS(%esp)
3495 3495          mov     %es, %edx
3496 3496          mov     %edx, REGOFF_ES(%esp)
3497 3497          mov     %ds, %edx
3498 3498          mov     %edx, REGOFF_DS(%esp)
3499 3499  #endif  /* __GNUC_AS__ */
3500 3500          movl    %edi, REGOFF_EDI(%esp)
3501 3501          movl    %esi, REGOFF_ESI(%esp)
3502 3502          movl    16(%ebx), %ecx
3503 3503          movl    %ecx, REGOFF_EBP(%esp)
3504 3504          movl    %ebx, %ecx
3505 3505          addl    $20, %ecx
3506 3506          movl    %ecx, REGOFF_ESP(%esp)
3507 3507          movl    8(%ebx), %ecx
3508 3508          movl    %ecx, REGOFF_EBX(%esp)
3509 3509          movl    0(%ebx), %ecx
3510 3510          movl    %ecx, REGOFF_EDX(%esp)
3511 3511          movl    4(%ebx), %ecx
3512 3512          movl    %ecx, REGOFF_ECX(%esp)
3513 3513          movl    12(%ebx), %ecx
3514 3514          movl    %ecx, REGOFF_EAX(%esp)
3515 3515          movl    $0, REGOFF_TRAPNO(%esp)
3516 3516          movl    $0, REGOFF_ERR(%esp)
3517 3517          lea     vpanic, %ecx
3518 3518          movl    %ecx, REGOFF_EIP(%esp)
3519 3519  #if !defined(__GNUC_AS__)
3520 3520          movw    %cs, %edx
3521 3521  #else   /* __GNUC_AS__ */
3522 3522          mov     %cs, %edx
3523 3523  #endif  /* __GNUC_AS__ */
3524 3524          movl    %edx, REGOFF_CS(%esp)
3525 3525          pushfl
3526 3526          popl    %ecx
3527 3527  #if defined(__xpv)
3528 3528          /*
3529 3529           * Synthesize the PS_IE bit from the event mask bit
3530 3530           */
3531 3531          CURTHREAD(%edx)
3532 3532          KPREEMPT_DISABLE(%edx)
3533 3533          EVENT_MASK_TO_IE(%edx, %ecx)
3534 3534          CURTHREAD(%edx)
3535 3535          KPREEMPT_ENABLE_NOKP(%edx)
3536 3536  #endif
3537 3537          movl    %ecx, REGOFF_EFL(%esp)
3538 3538          movl    $0, REGOFF_UESP(%esp)
3539 3539  #if !defined(__GNUC_AS__)
3540 3540          movw    %ss, %edx
3541 3541  #else   /* __GNUC_AS__ */
3542 3542          mov     %ss, %edx
3543 3543  #endif  /* __GNUC_AS__ */
3544 3544          movl    %edx, REGOFF_SS(%esp)
3545 3545  
3546 3546          movl    %esp, %ecx                      / %ecx = ®s
3547 3547          pushl   %eax                            / push on_panic_stack
3548 3548          pushl   %ecx                            / push ®s
3549 3549          movl    12(%ebp), %ecx                  / %ecx = alist
3550 3550          pushl   %ecx                            / push alist
3551 3551          movl    8(%ebp), %ecx                   / %ecx = format
3552 3552          pushl   %ecx                            / push format
3553 3553          call    panicsys                        / panicsys();
3554 3554          addl    $16, %esp                       / pop arguments
3555 3555  
3556 3556          addl    $REGSIZE, %esp
3557 3557          popl    %edx
3558 3558          popl    %ecx
3559 3559          popl    %ebx
3560 3560          popl    %eax
3561 3561          leave
3562 3562          ret
3563 3563          SET_SIZE(vpanic)
3564 3564  
3565 3565          ENTRY_NP(dtrace_vpanic)                 / Initial stack layout:
3566 3566  
3567 3567          pushl   %ebp                            / | %eip | 20
3568 3568          movl    %esp, %ebp                      / | %ebp | 16
3569 3569          pushl   %eax                            / | %eax | 12
3570 3570          pushl   %ebx                            / | %ebx |  8
3571 3571          pushl   %ecx                            / | %ecx |  4
3572 3572          pushl   %edx                            / | %edx |  0
3573 3573  
3574 3574          movl    %esp, %ebx                      / %ebx = current stack pointer
3575 3575  
3576 3576          lea     panic_quiesce, %eax             / %eax = &panic_quiesce
3577 3577          pushl   %eax                            / push &panic_quiesce
3578 3578          call    dtrace_panic_trigger            / %eax = dtrace_panic_trigger()
3579 3579          addl    $4, %esp                        / reset stack pointer
3580 3580          jmp     vpanic_common                   / jump back to common code
3581 3581  
3582 3582          SET_SIZE(dtrace_vpanic)
3583 3583  
3584 3584  #endif  /* __i386 */
3585 3585  #endif  /* __lint */
3586 3586  
3587 3587  #if defined(__lint)
3588 3588  
3589 3589  void
3590 3590  hres_tick(void)
3591 3591  {}
3592 3592  
3593 3593  int64_t timedelta;
3594 3594  hrtime_t hrtime_base;
3595 3595  
3596 3596  #else   /* __lint */
3597 3597  
3598 3598          DGDEF3(timedelta, 8, 8)
3599 3599          .long   0, 0
3600 3600  
3601 3601          /*
3602 3602           * initialized to a non zero value to make pc_gethrtime()
3603 3603           * work correctly even before clock is initialized
3604 3604           */
3605 3605          DGDEF3(hrtime_base, 8, 8)
3606 3606          .long   _MUL(NSEC_PER_CLOCK_TICK, 6), 0
3607 3607  
3608 3608          DGDEF3(adj_shift, 4, 4)
3609 3609          .long   ADJ_SHIFT
3610 3610  
3611 3611  #if defined(__amd64)
3612 3612  
3613 3613          ENTRY_NP(hres_tick)
3614 3614          pushq   %rbp
3615 3615          movq    %rsp, %rbp
3616 3616  
3617 3617          /*
3618 3618           * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3619 3619           * hres_last_tick can only be modified while holding CLOCK_LOCK).
3620 3620           * At worst, performing this now instead of under CLOCK_LOCK may
3621 3621           * introduce some jitter in pc_gethrestime().
3622 3622           */
3623 3623          call    *gethrtimef(%rip)
3624 3624          movq    %rax, %r8
3625 3625  
3626 3626          leaq    hres_lock(%rip), %rax
3627 3627          movb    $-1, %dl
3628 3628  .CL1:
3629 3629          xchgb   %dl, (%rax)
3630 3630          testb   %dl, %dl
3631 3631          jz      .CL3                    /* got it */
3632 3632  .CL2:
3633 3633          cmpb    $0, (%rax)              /* possible to get lock? */
3634 3634          pause
3635 3635          jne     .CL2
3636 3636          jmp     .CL1                    /* yes, try again */
3637 3637  .CL3:
3638 3638          /*
3639 3639           * compute the interval since last time hres_tick was called
3640 3640           * and adjust hrtime_base and hrestime accordingly
3641 3641           * hrtime_base is an 8 byte value (in nsec), hrestime is
3642 3642           * a timestruc_t (sec, nsec)
3643 3643           */
3644 3644          leaq    hres_last_tick(%rip), %rax
3645 3645          movq    %r8, %r11
3646 3646          subq    (%rax), %r8
3647 3647          addq    %r8, hrtime_base(%rip)  /* add interval to hrtime_base */
3648 3648          addq    %r8, hrestime+8(%rip)   /* add interval to hrestime.tv_nsec */
3649 3649          /*
3650 3650           * Now that we have CLOCK_LOCK, we can update hres_last_tick
3651 3651           */     
3652 3652          movq    %r11, (%rax)    
3653 3653  
3654 3654          call    __adj_hrestime
3655 3655  
3656 3656          /*
3657 3657           * release the hres_lock
3658 3658           */
3659 3659          incl    hres_lock(%rip)
3660 3660          leave
3661 3661          ret
3662 3662          SET_SIZE(hres_tick)
3663 3663          
3664 3664  #elif defined(__i386)
3665 3665  
3666 3666          ENTRY_NP(hres_tick)
3667 3667          pushl   %ebp
3668 3668          movl    %esp, %ebp
3669 3669          pushl   %esi
3670 3670          pushl   %ebx
3671 3671  
3672 3672          /*
3673 3673           * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3674 3674           * hres_last_tick can only be modified while holding CLOCK_LOCK).
3675 3675           * At worst, performing this now instead of under CLOCK_LOCK may
3676 3676           * introduce some jitter in pc_gethrestime().
3677 3677           */
3678 3678          call    *gethrtimef
3679 3679          movl    %eax, %ebx
3680 3680          movl    %edx, %esi
3681 3681  
3682 3682          movl    $hres_lock, %eax
3683 3683          movl    $-1, %edx
3684 3684  .CL1:
3685 3685          xchgb   %dl, (%eax)
3686 3686          testb   %dl, %dl
3687 3687          jz      .CL3                    / got it
3688 3688  .CL2:
3689 3689          cmpb    $0, (%eax)              / possible to get lock?
3690 3690          pause
3691 3691          jne     .CL2
3692 3692          jmp     .CL1                    / yes, try again
3693 3693  .CL3:
3694 3694          /*
3695 3695           * compute the interval since last time hres_tick was called
3696 3696           * and adjust hrtime_base and hrestime accordingly
3697 3697           * hrtime_base is an 8 byte value (in nsec), hrestime is
3698 3698           * timestruc_t (sec, nsec)
3699 3699           */
3700 3700  
3701 3701          lea     hres_last_tick, %eax
3702 3702  
3703 3703          movl    %ebx, %edx
3704 3704          movl    %esi, %ecx
3705 3705  
3706 3706          subl    (%eax), %edx
3707 3707          sbbl    4(%eax), %ecx
3708 3708  
3709 3709          addl    %edx, hrtime_base       / add interval to hrtime_base
3710 3710          adcl    %ecx, hrtime_base+4
3711 3711  
3712 3712          addl    %edx, hrestime+4        / add interval to hrestime.tv_nsec
3713 3713  
3714 3714          /
3715 3715          / Now that we have CLOCK_LOCK, we can update hres_last_tick.
3716 3716          /
3717 3717          movl    %ebx, (%eax)
3718 3718          movl    %esi,  4(%eax)
3719 3719  
3720 3720          / get hrestime at this moment. used as base for pc_gethrestime
3721 3721          /
3722 3722          / Apply adjustment, if any
3723 3723          /
3724 3724          / #define HRES_ADJ      (NSEC_PER_CLOCK_TICK >> ADJ_SHIFT)
3725 3725          / (max_hres_adj)
3726 3726          /
3727 3727          / void
3728 3728          / adj_hrestime()
3729 3729          / {
3730 3730          /       long long adj;
3731 3731          /
3732 3732          /       if (hrestime_adj == 0)
3733 3733          /               adj = 0;
3734 3734          /       else if (hrestime_adj > 0) {
3735 3735          /               if (hrestime_adj < HRES_ADJ)
3736 3736          /                       adj = hrestime_adj;
3737 3737          /               else
3738 3738          /                       adj = HRES_ADJ;
3739 3739          /       }
3740 3740          /       else {
3741 3741          /               if (hrestime_adj < -(HRES_ADJ))
3742 3742          /                       adj = -(HRES_ADJ);
3743 3743          /               else
3744 3744          /                       adj = hrestime_adj;
3745 3745          /       }
3746 3746          /
3747 3747          /       timedelta -= adj;
3748 3748          /       hrestime_adj = timedelta;
3749 3749          /       hrestime.tv_nsec += adj;
3750 3750          /
3751 3751          /       while (hrestime.tv_nsec >= NANOSEC) {
3752 3752          /               one_sec++;
3753 3753          /               hrestime.tv_sec++;
3754 3754          /               hrestime.tv_nsec -= NANOSEC;
3755 3755          /       }
3756 3756          / }
3757 3757  __adj_hrestime:
3758 3758          movl    hrestime_adj, %esi      / if (hrestime_adj == 0)
3759 3759          movl    hrestime_adj+4, %edx
3760 3760          andl    %esi, %esi
3761 3761          jne     .CL4                    / no
3762 3762          andl    %edx, %edx
3763 3763          jne     .CL4                    / no
3764 3764          subl    %ecx, %ecx              / yes, adj = 0;
3765 3765          subl    %edx, %edx
3766 3766          jmp     .CL5
3767 3767  .CL4:
3768 3768          subl    %ecx, %ecx
3769 3769          subl    %eax, %eax
3770 3770          subl    %esi, %ecx
3771 3771          sbbl    %edx, %eax
3772 3772          andl    %eax, %eax              / if (hrestime_adj > 0)
3773 3773          jge     .CL6
3774 3774  
3775 3775          / In the following comments, HRES_ADJ is used, while in the code
3776 3776          / max_hres_adj is used.
3777 3777          /
3778 3778          / The test for "hrestime_adj < HRES_ADJ" is complicated because
3779 3779          / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3780 3780          / on the logical equivalence of:
3781 3781          /
3782 3782          /       !(hrestime_adj < HRES_ADJ)
3783 3783          /
3784 3784          / and the two step sequence:
3785 3785          /
3786 3786          /       (HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry
3787 3787          /
3788 3788          / which computes whether or not the least significant 32-bits
3789 3789          / of hrestime_adj is greater than HRES_ADJ, followed by:
3790 3790          /
3791 3791          /       Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry
3792 3792          /
3793 3793          / which generates a carry whenever step 1 is true or the most
3794 3794          / significant long of the longlong hrestime_adj is non-zero.
3795 3795  
3796 3796          movl    max_hres_adj, %ecx      / hrestime_adj is positive
3797 3797          subl    %esi, %ecx
3798 3798          movl    %edx, %eax
3799 3799          adcl    $-1, %eax
3800 3800          jnc     .CL7
3801 3801          movl    max_hres_adj, %ecx      / adj = HRES_ADJ;
3802 3802          subl    %edx, %edx
3803 3803          jmp     .CL5
3804 3804  
3805 3805          / The following computation is similar to the one above.
3806 3806          /
3807 3807          / The test for "hrestime_adj < -(HRES_ADJ)" is complicated because
3808 3808          / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3809 3809          / on the logical equivalence of:
3810 3810          /
3811 3811          /       (hrestime_adj > -HRES_ADJ)
3812 3812          /
3813 3813          / and the two step sequence:
3814 3814          /
3815 3815          /       (HRES_ADJ + lsw(hrestime_adj)) generates a Carry
3816 3816          /
3817 3817          / which means the least significant 32-bits of hrestime_adj is
3818 3818          / greater than -HRES_ADJ, followed by:
3819 3819          /
3820 3820          /       Previous Carry + 0 + msw(hrestime_adj) generates a Carry
3821 3821          /
3822 3822          / which generates a carry only when step 1 is true and the most
3823 3823          / significant long of the longlong hrestime_adj is -1.
3824 3824  
3825 3825  .CL6:                                   / hrestime_adj is negative
3826 3826          movl    %esi, %ecx
3827 3827          addl    max_hres_adj, %ecx
3828 3828          movl    %edx, %eax
3829 3829          adcl    $0, %eax
3830 3830          jc      .CL7
3831 3831          xor     %ecx, %ecx
3832 3832          subl    max_hres_adj, %ecx      / adj = -(HRES_ADJ);
3833 3833          movl    $-1, %edx
3834 3834          jmp     .CL5
3835 3835  .CL7:
3836 3836          movl    %esi, %ecx              / adj = hrestime_adj;
3837 3837  .CL5:
3838 3838          movl    timedelta, %esi
3839 3839          subl    %ecx, %esi
3840 3840          movl    timedelta+4, %eax
3841 3841          sbbl    %edx, %eax
3842 3842          movl    %esi, timedelta
3843 3843          movl    %eax, timedelta+4       / timedelta -= adj;
3844 3844          movl    %esi, hrestime_adj
3845 3845          movl    %eax, hrestime_adj+4    / hrestime_adj = timedelta;
3846 3846          addl    hrestime+4, %ecx
3847 3847  
3848 3848          movl    %ecx, %eax              / eax = tv_nsec
3849 3849  1:
3850 3850          cmpl    $NANOSEC, %eax          / if ((unsigned long)tv_nsec >= NANOSEC)
3851 3851          jb      .CL8                    / no
3852 3852          incl    one_sec                 / yes,  one_sec++;
3853 3853          incl    hrestime                / hrestime.tv_sec++;
3854 3854          addl    $-NANOSEC, %eax         / tv_nsec -= NANOSEC
3855 3855          jmp     1b                      / check for more seconds
3856 3856  
3857 3857  .CL8:
3858 3858          movl    %eax, hrestime+4        / store final into hrestime.tv_nsec
3859 3859          incl    hres_lock               / release the hres_lock
3860 3860  
3861 3861          popl    %ebx
3862 3862          popl    %esi
3863 3863          leave
3864 3864          ret
3865 3865          SET_SIZE(hres_tick)
3866 3866  
3867 3867  #endif  /* __i386 */
3868 3868  #endif  /* __lint */
3869 3869  
3870 3870  /*
3871 3871   * void prefetch_smap_w(void *)
3872 3872   *
3873 3873   * Prefetch ahead within a linear list of smap structures.
3874 3874   * Not implemented for ia32.  Stub for compatibility.
3875 3875   */
3876 3876  
3877 3877  #if defined(__lint)
3878 3878  
3879 3879  /*ARGSUSED*/
3880 3880  void prefetch_smap_w(void *smp)
3881 3881  {}
3882 3882  
3883 3883  #else   /* __lint */
3884 3884  
3885 3885          ENTRY(prefetch_smap_w)
3886 3886          rep;    ret     /* use 2 byte return instruction when branch target */
3887 3887                          /* AMD Software Optimization Guide - Section 6.2 */
3888 3888          SET_SIZE(prefetch_smap_w)
3889 3889  
3890 3890  #endif  /* __lint */
3891 3891  
3892 3892  /*
3893 3893   * prefetch_page_r(page_t *)
3894 3894   * issue prefetch instructions for a page_t
3895 3895   */
3896 3896  #if defined(__lint)
3897 3897  
3898 3898  /*ARGSUSED*/
3899 3899  void
3900 3900  prefetch_page_r(void *pp)
3901 3901  {}
3902 3902  
3903 3903  #else   /* __lint */
3904 3904  
3905 3905          ENTRY(prefetch_page_r)
3906 3906          rep;    ret     /* use 2 byte return instruction when branch target */
3907 3907                          /* AMD Software Optimization Guide - Section 6.2 */
3908 3908          SET_SIZE(prefetch_page_r)
3909 3909  
3910 3910  #endif  /* __lint */
3911 3911  
3912 3912  #if defined(__lint)
3913 3913  
3914 3914  /*ARGSUSED*/
3915 3915  int
3916 3916  bcmp(const void *s1, const void *s2, size_t count)
3917 3917  { return (0); }
3918 3918  
3919 3919  #else   /* __lint */
3920 3920  
3921 3921  #if defined(__amd64)
3922 3922  
3923 3923          ENTRY(bcmp)
3924 3924          pushq   %rbp
3925 3925          movq    %rsp, %rbp
3926 3926  #ifdef DEBUG
3927 3927          testq   %rdx,%rdx
3928 3928          je      1f
3929 3929          movq    postbootkernelbase(%rip), %r11
3930 3930          cmpq    %r11, %rdi
3931 3931          jb      0f
3932 3932          cmpq    %r11, %rsi
3933 3933          jnb     1f
3934 3934  0:      leaq    .bcmp_panic_msg(%rip), %rdi
3935 3935          xorl    %eax, %eax
3936 3936          call    panic
3937 3937  1:
3938 3938  #endif  /* DEBUG */
3939 3939          call    memcmp
3940 3940          testl   %eax, %eax
3941 3941          setne   %dl
3942 3942          leave
3943 3943          movzbl  %dl, %eax
3944 3944          ret
3945 3945          SET_SIZE(bcmp)
3946 3946          
3947 3947  #elif defined(__i386)
3948 3948          
3949 3949  #define ARG_S1          8
3950 3950  #define ARG_S2          12
3951 3951  #define ARG_LENGTH      16
3952 3952  
3953 3953          ENTRY(bcmp)
3954 3954          pushl   %ebp
3955 3955          movl    %esp, %ebp      / create new stack frame
3956 3956  #ifdef DEBUG
3957 3957          cmpl    $0, ARG_LENGTH(%ebp)
3958 3958          je      1f
3959 3959          movl    postbootkernelbase, %eax
3960 3960          cmpl    %eax, ARG_S1(%ebp)
3961 3961          jb      0f
3962 3962          cmpl    %eax, ARG_S2(%ebp)
3963 3963          jnb     1f
3964 3964  0:      pushl   $.bcmp_panic_msg
3965 3965          call    panic
3966 3966  1:
3967 3967  #endif  /* DEBUG */
3968 3968  
3969 3969          pushl   %edi            / save register variable
3970 3970          movl    ARG_S1(%ebp), %eax      / %eax = address of string 1
3971 3971          movl    ARG_S2(%ebp), %ecx      / %ecx = address of string 2
3972 3972          cmpl    %eax, %ecx      / if the same string
3973 3973          je      .equal          / goto .equal
3974 3974          movl    ARG_LENGTH(%ebp), %edi  / %edi = length in bytes
3975 3975          cmpl    $4, %edi        / if %edi < 4
3976 3976          jb      .byte_check     / goto .byte_check
3977 3977          .align  4
3978 3978  .word_loop:
3979 3979          movl    (%ecx), %edx    / move 1 word from (%ecx) to %edx
3980 3980          leal    -4(%edi), %edi  / %edi -= 4
3981 3981          cmpl    (%eax), %edx    / compare 1 word from (%eax) with %edx
3982 3982          jne     .word_not_equal / if not equal, goto .word_not_equal
3983 3983          leal    4(%ecx), %ecx   / %ecx += 4 (next word)
3984 3984          leal    4(%eax), %eax   / %eax += 4 (next word)
3985 3985          cmpl    $4, %edi        / if %edi >= 4
3986 3986          jae     .word_loop      / goto .word_loop
3987 3987  .byte_check:
3988 3988          cmpl    $0, %edi        / if %edi == 0
3989 3989          je      .equal          / goto .equal
3990 3990          jmp     .byte_loop      / goto .byte_loop (checks in bytes)
3991 3991  .word_not_equal:
3992 3992          leal    4(%edi), %edi   / %edi += 4 (post-decremented)
3993 3993          .align  4
3994 3994  .byte_loop:
3995 3995          movb    (%ecx), %dl     / move 1 byte from (%ecx) to %dl
3996 3996          cmpb    %dl, (%eax)     / compare %dl with 1 byte from (%eax)
3997 3997          jne     .not_equal      / if not equal, goto .not_equal
3998 3998          incl    %ecx            / %ecx++ (next byte)
3999 3999          incl    %eax            / %eax++ (next byte)
4000 4000          decl    %edi            / %edi--
4001 4001          jnz     .byte_loop      / if not zero, goto .byte_loop
4002 4002  .equal:
4003 4003          xorl    %eax, %eax      / %eax = 0
4004 4004          popl    %edi            / restore register variable
4005 4005          leave                   / restore old stack frame
4006 4006          ret                     / return (NULL)
4007 4007          .align  4
4008 4008  .not_equal:
4009 4009          movl    $1, %eax        / return 1
4010 4010          popl    %edi            / restore register variable
4011 4011          leave                   / restore old stack frame
4012 4012          ret                     / return (NULL)
4013 4013          SET_SIZE(bcmp)
4014 4014  
4015 4015  #endif  /* __i386 */
4016 4016  
4017 4017  #ifdef DEBUG
4018 4018          .text
4019 4019  .bcmp_panic_msg:
4020 4020          .string "bcmp: arguments below kernelbase"
4021 4021  #endif  /* DEBUG */
4022 4022  
4023 4023  #endif  /* __lint */
4024 4024  
4025 4025  #if defined(__lint)
4026 4026  
4027 4027  uint_t
4028 4028  bsrw_insn(uint16_t mask)
4029 4029  {
4030 4030          uint_t index = sizeof (mask) * NBBY - 1;
4031 4031  
4032 4032          while ((mask & (1 << index)) == 0)
4033 4033                  index--;
4034 4034          return (index);
4035 4035  }
4036 4036  
4037 4037  #else   /* __lint */
4038 4038  
4039 4039  #if defined(__amd64)
4040 4040  
4041 4041          ENTRY_NP(bsrw_insn)
4042 4042          xorl    %eax, %eax
4043 4043          bsrw    %di, %ax
4044 4044          ret
4045 4045          SET_SIZE(bsrw_insn)
4046 4046  
4047 4047  #elif defined(__i386)
4048 4048  
4049 4049          ENTRY_NP(bsrw_insn)
4050 4050          movw    4(%esp), %cx
4051 4051          xorl    %eax, %eax
4052 4052          bsrw    %cx, %ax
4053 4053          ret
4054 4054          SET_SIZE(bsrw_insn)
4055 4055  
4056 4056  #endif  /* __i386 */
4057 4057  #endif  /* __lint */
4058 4058  
4059 4059  #if defined(__lint)
4060 4060  
4061 4061  uint_t
4062 4062  atomic_btr32(uint32_t *pending, uint_t pil)
4063 4063  {
4064 4064          return (*pending &= ~(1 << pil));
4065 4065  }
4066 4066  
4067 4067  #else   /* __lint */
4068 4068  
4069 4069  #if defined(__i386)
4070 4070  
4071 4071          ENTRY_NP(atomic_btr32)
4072 4072          movl    4(%esp), %ecx
4073 4073          movl    8(%esp), %edx
4074 4074          xorl    %eax, %eax
4075 4075          lock
4076 4076          btrl    %edx, (%ecx)
4077 4077          setc    %al
4078 4078          ret
4079 4079          SET_SIZE(atomic_btr32)
4080 4080  
4081 4081  #endif  /* __i386 */
4082 4082  #endif  /* __lint */
4083 4083  
4084 4084  #if defined(__lint)
4085 4085  
4086 4086  /*ARGSUSED*/
4087 4087  void
4088 4088  switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
4089 4089              uint_t arg2)
4090 4090  {}
4091 4091  
4092 4092  #else   /* __lint */
4093 4093  
4094 4094  #if defined(__amd64)
4095 4095  
4096 4096          ENTRY_NP(switch_sp_and_call)
4097 4097          pushq   %rbp
4098 4098          movq    %rsp, %rbp              /* set up stack frame */
4099 4099          movq    %rdi, %rsp              /* switch stack pointer */
4100 4100          movq    %rdx, %rdi              /* pass func arg 1 */
4101 4101          movq    %rsi, %r11              /* save function to call */
4102 4102          movq    %rcx, %rsi              /* pass func arg 2 */
4103 4103          call    *%r11                   /* call function */
4104 4104          leave                           /* restore stack */
4105 4105          ret
4106 4106          SET_SIZE(switch_sp_and_call)
4107 4107  
4108 4108  #elif defined(__i386)
4109 4109  
4110 4110          ENTRY_NP(switch_sp_and_call)
4111 4111          pushl   %ebp
4112 4112          mov     %esp, %ebp              /* set up stack frame */
4113 4113          movl    8(%ebp), %esp           /* switch stack pointer */
4114 4114          pushl   20(%ebp)                /* push func arg 2 */
4115 4115          pushl   16(%ebp)                /* push func arg 1 */
4116 4116          call    *12(%ebp)               /* call function */
4117 4117          addl    $8, %esp                /* pop arguments */
4118 4118          leave                           /* restore stack */
4119 4119          ret
4120 4120          SET_SIZE(switch_sp_and_call)
4121 4121  
4122 4122  #endif  /* __i386 */
4123 4123  #endif  /* __lint */
4124 4124  
4125 4125  #if defined(__lint)
4126 4126  
4127 4127  void
4128 4128  kmdb_enter(void)
4129 4129  {}
4130 4130  
4131 4131  #else   /* __lint */
4132 4132  
4133 4133  #if defined(__amd64)
4134 4134  
4135 4135          ENTRY_NP(kmdb_enter)
4136 4136          pushq   %rbp
4137 4137          movq    %rsp, %rbp
4138 4138  
4139 4139          /*
4140 4140           * Save flags, do a 'cli' then return the saved flags
4141 4141           */
4142 4142          call    intr_clear
4143 4143  
4144 4144          int     $T_DBGENTR
4145 4145  
4146 4146          /*
4147 4147           * Restore the saved flags
4148 4148           */
4149 4149          movq    %rax, %rdi
4150 4150          call    intr_restore
4151 4151  
4152 4152          leave
4153 4153          ret     
4154 4154          SET_SIZE(kmdb_enter)
4155 4155  
4156 4156  #elif defined(__i386)
4157 4157  
4158 4158          ENTRY_NP(kmdb_enter)
4159 4159          pushl   %ebp
4160 4160          movl    %esp, %ebp
4161 4161  
4162 4162          /*
4163 4163           * Save flags, do a 'cli' then return the saved flags
4164 4164           */
4165 4165          call    intr_clear
4166 4166  
4167 4167          int     $T_DBGENTR
4168 4168  
4169 4169          /*
4170 4170           * Restore the saved flags
4171 4171           */
4172 4172          pushl   %eax
4173 4173          call    intr_restore
4174 4174          addl    $4, %esp
4175 4175  
4176 4176          leave
4177 4177          ret     
4178 4178          SET_SIZE(kmdb_enter)
4179 4179  
4180 4180  #endif  /* __i386 */
4181 4181  #endif  /* __lint */
4182 4182  
4183 4183  #if defined(__lint)
4184 4184  
4185 4185  void
4186 4186  return_instr(void)
4187 4187  {}
4188 4188  
4189 4189  #else   /* __lint */
4190 4190  
4191 4191          ENTRY_NP(return_instr)
4192 4192          rep;    ret     /* use 2 byte instruction when branch target */
4193 4193                          /* AMD Software Optimization Guide - Section 6.2 */
4194 4194          SET_SIZE(return_instr)
4195 4195  
4196 4196  #endif  /* __lint */
4197 4197  
4198 4198  #if defined(__lint)
4199 4199  
4200 4200  ulong_t
4201 4201  getflags(void)
4202 4202  {
4203 4203          return (0);
4204 4204  }
4205 4205  
4206 4206  #else   /* __lint */
4207 4207  
4208 4208  #if defined(__amd64)
4209 4209  
4210 4210          ENTRY(getflags)
4211 4211          pushfq
4212 4212          popq    %rax
4213 4213  #if defined(__xpv)
4214 4214          CURTHREAD(%rdi)
4215 4215          KPREEMPT_DISABLE(%rdi)
4216 4216          /*
4217 4217           * Synthesize the PS_IE bit from the event mask bit
4218 4218           */
4219 4219          CURVCPU(%r11)
4220 4220          andq    $_BITNOT(PS_IE), %rax
4221 4221          XEN_TEST_UPCALL_MASK(%r11)
4222 4222          jnz     1f
4223 4223          orq     $PS_IE, %rax
4224 4224  1:
4225 4225          KPREEMPT_ENABLE_NOKP(%rdi)
4226 4226  #endif
4227 4227          ret
4228 4228          SET_SIZE(getflags)
4229 4229  
4230 4230  #elif defined(__i386)
4231 4231  
4232 4232          ENTRY(getflags)
4233 4233          pushfl
4234 4234          popl    %eax
4235 4235  #if defined(__xpv)
4236 4236          CURTHREAD(%ecx)
4237 4237          KPREEMPT_DISABLE(%ecx)
4238 4238          /*
4239 4239           * Synthesize the PS_IE bit from the event mask bit
4240 4240           */
4241 4241          CURVCPU(%edx)
4242 4242          andl    $_BITNOT(PS_IE), %eax
4243 4243          XEN_TEST_UPCALL_MASK(%edx)
4244 4244          jnz     1f
4245 4245          orl     $PS_IE, %eax
4246 4246  1:
4247 4247          KPREEMPT_ENABLE_NOKP(%ecx)
4248 4248  #endif
4249 4249          ret
4250 4250          SET_SIZE(getflags)
4251 4251  
4252 4252  #endif  /* __i386 */
4253 4253  
4254 4254  #endif  /* __lint */
4255 4255  
4256 4256  #if defined(__lint)
4257 4257  
4258 4258  ftrace_icookie_t
4259 4259  ftrace_interrupt_disable(void)
4260 4260  { return (0); }
4261 4261  
4262 4262  #else   /* __lint */
4263 4263  
4264 4264  #if defined(__amd64)
4265 4265  
4266 4266          ENTRY(ftrace_interrupt_disable)
4267 4267          pushfq
4268 4268          popq    %rax
4269 4269          CLI(%rdx)
4270 4270          ret
4271 4271          SET_SIZE(ftrace_interrupt_disable)
4272 4272  
4273 4273  #elif defined(__i386)
4274 4274                  
4275 4275          ENTRY(ftrace_interrupt_disable)
4276 4276          pushfl
4277 4277          popl    %eax
4278 4278          CLI(%edx)
4279 4279          ret
4280 4280          SET_SIZE(ftrace_interrupt_disable)
4281 4281  
4282 4282  #endif  /* __i386 */    
4283 4283  #endif  /* __lint */
4284 4284  
4285 4285  #if defined(__lint)
4286 4286  
4287 4287  /*ARGSUSED*/
4288 4288  void
4289 4289  ftrace_interrupt_enable(ftrace_icookie_t cookie)
4290 4290  {}
4291 4291  
4292 4292  #else   /* __lint */
4293 4293  
4294 4294  #if defined(__amd64)
4295 4295  
4296 4296          ENTRY(ftrace_interrupt_enable)
4297 4297          pushq   %rdi
4298 4298          popfq
4299 4299          ret
4300 4300          SET_SIZE(ftrace_interrupt_enable)
4301 4301  
4302 4302  #elif defined(__i386)
4303 4303                  
4304 4304          ENTRY(ftrace_interrupt_enable)
4305 4305          movl    4(%esp), %eax
4306 4306          pushl   %eax
4307 4307          popfl
4308 4308          ret
4309 4309          SET_SIZE(ftrace_interrupt_enable)
4310 4310  
4311 4311  #endif  /* __i386 */
4312 4312  #endif  /* __lint */
4313 4313  
4314 4314  #if defined (__lint)
4315 4315  
4316 4316  /*ARGSUSED*/
4317 4317  void
4318 4318  clflush_insn(caddr_t addr)
4319 4319  {}
4320 4320  
4321 4321  #else /* __lint */
4322 4322  
4323 4323  #if defined (__amd64)
4324 4324          ENTRY(clflush_insn)
4325 4325          clflush (%rdi)
4326 4326          ret
4327 4327          SET_SIZE(clflush_insn)
4328 4328  #elif defined (__i386)
4329 4329          ENTRY(clflush_insn)
4330 4330          movl    4(%esp), %eax
4331 4331          clflush (%eax)
4332 4332          ret
4333 4333          SET_SIZE(clflush_insn)
4334 4334  
4335 4335  #endif /* __i386 */
4336 4336  #endif /* __lint */
4337 4337  
4338 4338  #if defined (__lint)
4339 4339  /*ARGSUSED*/
4340 4340  void
4341 4341  mfence_insn(void)
4342 4342  {}
4343 4343  
4344 4344  #else /* __lint */
4345 4345  
4346 4346  #if defined (__amd64)
4347 4347          ENTRY(mfence_insn)
4348 4348          mfence
4349 4349          ret
4350 4350          SET_SIZE(mfence_insn)
4351 4351  #elif defined (__i386)
4352 4352          ENTRY(mfence_insn)
4353 4353          mfence
4354 4354          ret
4355 4355          SET_SIZE(mfence_insn)
4356 4356  
4357 4357  #endif /* __i386 */
4358 4358  #endif /* __lint */
4359 4359  
4360 4360  /*
4361 4361   * VMware implements an I/O port that programs can query to detect if software
4362 4362   * is running in a VMware hypervisor. This hypervisor port behaves differently
4363 4363   * depending on magic values in certain registers and modifies some registers
4364 4364   * as a side effect.
4365 4365   *
4366 4366   * References: http://kb.vmware.com/kb/1009458 
4367 4367   */
4368 4368  
4369 4369  #if defined(__lint)
4370 4370  
4371 4371  /* ARGSUSED */
4372 4372  void
4373 4373  vmware_port(int cmd, uint32_t *regs) { return; }
4374 4374  
4375 4375  #else
4376 4376  
4377 4377  #if defined(__amd64)
4378 4378  
4379 4379          ENTRY(vmware_port)
4380 4380          pushq   %rbx
4381 4381          movl    $VMWARE_HVMAGIC, %eax
4382 4382          movl    $0xffffffff, %ebx
4383 4383          movl    %edi, %ecx
4384 4384          movl    $VMWARE_HVPORT, %edx
4385 4385          inl     (%dx)
4386 4386          movl    %eax, (%rsi)
4387 4387          movl    %ebx, 4(%rsi)
4388 4388          movl    %ecx, 8(%rsi)
4389 4389          movl    %edx, 12(%rsi)
4390 4390          popq    %rbx
4391 4391          ret
4392 4392          SET_SIZE(vmware_port)
4393 4393  
4394 4394  #elif defined(__i386)
4395 4395  
4396 4396          ENTRY(vmware_port)
4397 4397          pushl   %ebx
4398 4398          pushl   %esi
4399 4399          movl    $VMWARE_HVMAGIC, %eax
4400 4400          movl    $0xffffffff, %ebx
4401 4401          movl    12(%esp), %ecx
4402 4402          movl    $VMWARE_HVPORT, %edx
4403 4403          inl     (%dx)
4404 4404          movl    16(%esp), %esi
4405 4405          movl    %eax, (%esi)
4406 4406          movl    %ebx, 4(%esi)
4407 4407          movl    %ecx, 8(%esi)
4408 4408          movl    %edx, 12(%esi)
4409 4409          popl    %esi
4410 4410          popl    %ebx
4411 4411          ret
4412 4412          SET_SIZE(vmware_port)
4413 4413  
4414 4414  #endif /* __i386 */
4415 4415  #endif /* __lint */
  
    | 
      ↓ open down ↓ | 
    4415 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX