Print this page
    
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/lib/commpage/common/cp_main.c
          +++ new/usr/src/lib/commpage/common/cp_main.c
   1    1  /*
   2    2   * This file and its contents are supplied under the terms of the
   3    3   * Common Development and Distribution License ("CDDL"), version 1.0.
   4    4   * You may only use this file in accordance with the terms of version
   5    5   * 1.0 of the CDDL.
   6    6   *
   7    7   * A full copy of the text of the CDDL should have accompanied this
   8    8   * source.  A copy of the CDDL is also available via the Internet at
   9    9   * http://www.illumos.org/license/CDDL.
  10   10   */
  11   11  
  12   12  /*
  13   13   * Copyright 2016 Joyent, Inc.
  14   14   */
  15   15  
  16   16  #include <sys/comm_page.h>
  17   17  #include <sys/tsc.h>
  18   18  
  19   19  
  20   20  /*
  21   21   * Interrogate if querying the clock via the comm page is possible.
  22   22   */
  23   23  int
  24   24  __cp_can_gettime(comm_page_t *cp)
  25   25  {
  26   26          switch (cp->cp_tsc_type) {
  27   27          case TSC_TSCP:
  28   28          case TSC_RDTSC_MFENCE:
  29   29          case TSC_RDTSC_LFENCE:
  30   30          case TSC_RDTSC_CPUID:
  31   31                  return (1);
  32   32          default:
  33   33                  break;
  34   34          }
  35   35          return (0);
  36   36  }
  37   37  
  38   38  #ifdef __amd64
  39   39  
  40   40  /*
  41   41   * The functions used for calculating time (both monotonic and wall-clock) are
  42   42   * implemented in assembly on amd64.  This is primarily for stack conservation.
  43   43   */
  44   44  
  45   45  #else /* i386 below */
  46   46  
  47   47  /*
  48   48   * ASM-defined functions.
  49   49   */
  50   50  extern hrtime_t __cp_tsc_read(comm_page_t *);
  51   51  
  52   52  /*
  53   53   * These are cloned from TSC and time related code in the kernel.  The should
  54   54   * be kept in sync in the case that the source values are changed.
  55   55   */
  56   56  #define NSEC_SHIFT      5
  57   57  #define ADJ_SHIFT       4
  58   58  #define NANOSEC         1000000000LL
  59   59  
  60   60  #define TSC_CONVERT_AND_ADD(tsc, hrt, scale) do {               \
  61   61          uint32_t *_l = (uint32_t *)&(tsc);                      \
  62   62          uint64_t sc = (uint32_t)(scale);                        \
  63   63          (hrt) += (uint64_t)(_l[1] * sc) << NSEC_SHIFT;          \
  64   64          (hrt) += (uint64_t)(_l[0] * sc) >> (32 - NSEC_SHIFT);   \
  65   65  } while (0)
  66   66  
  67   67  /*
  68   68   * Userspace version of tsc_gethrtime.
  69   69   * See: uts/i86pc/os/timestamp.c
  70   70   */
  71   71  hrtime_t
  72   72  __cp_gethrtime(comm_page_t *cp)
  73   73  {
  74   74          uint32_t old_hres_lock;
  75   75          hrtime_t tsc, hrt, tsc_last;
  76   76  
  77   77          /*
  78   78           * Several precautions must be taken when collecting the data necessary
  79   79           * to perform an accurate gethrtime calculation.
  80   80           *
  81   81           * While much of the TSC state stored in the comm page is unchanging
  82   82           * after boot, portions of it are periodically updated during OS ticks.
  83   83           * Changes to hres_lock during the course of the copy indicates a
  84   84           * potentially inconsistent snapshot, necessitating a loop.
  85   85           *
  86   86           * Even more complicated is the handling for TSCs which require sync
  87   87           * offsets between different CPUs.  Since userspace lacks the luxury of
  88   88           * disabling interrupts, a validation loop checking for CPU migrations
  89   89           * is used.  Pathological scheduling could, in theory, "outwit"
  90   90           * this check.  Such a possibility is considered an acceptable risk.
  91   91           *
  92   92           */
  93   93          do {
  94   94                  old_hres_lock = cp->cp_hres_lock;
  95   95                  tsc_last = cp->cp_tsc_last;
  96   96                  hrt = cp->cp_tsc_hrtime_base;
  97   97                  tsc = __cp_tsc_read(cp);
  98   98          } while ((old_hres_lock & ~1) != cp->cp_hres_lock);
  99   99  
 100  100          if (tsc >= tsc_last) {
 101  101                  tsc -= tsc_last;
 102  102          } else if (tsc >= tsc_last - (2 * cp->cp_tsc_max_delta)) {
 103  103                  tsc = 0;
 104  104          } else if (tsc > cp->cp_tsc_resume_cap) {
 105  105                  tsc = cp->cp_tsc_resume_cap;
 106  106          }
 107  107          TSC_CONVERT_AND_ADD(tsc, hrt, cp->cp_nsec_scale);
 108  108  
 109  109          return (hrt);
 110  110  }
 111  111  
 112  112  /*
 113  113   * Userspace version of pc_gethrestime.
 114  114   * See: uts/i86pc/os/machdep.c
 115  115   */
 116  116  int
 117  117  __cp_clock_gettime_realtime(comm_page_t *cp, timespec_t *tsp)
 118  118  {
 119  119          int lock_prev, nslt;
 120  120          timespec_t now;
 121  121          int64_t hres_adj;
 122  122  
 123  123  loop:
 124  124          lock_prev = cp->cp_hres_lock;
 125  125          now.tv_sec = cp->cp_hrestime[0];
 126  126          now.tv_nsec = cp->cp_hrestime[1];
 127  127          nslt = (int)(__cp_gethrtime(cp) - cp->cp_hres_last_tick);
 128  128          hres_adj = cp->cp_hrestime_adj;
 129  129          if (nslt < 0) {
 130  130                  /*
 131  131                   * Tick came between sampling hrtime and hres_last_tick;
 132  132                   */
 133  133                  goto loop;
 134  134          }
 135  135          now.tv_nsec += nslt;
 136  136  
 137  137          /*
 138  138           * Apply hres_adj skew, if needed.
 139  139           */
 140  140          if (hres_adj > 0) {
 141  141                  nslt = (nslt >> ADJ_SHIFT);
 142  142                  if (nslt > hres_adj)
 143  143                          nslt = (int)hres_adj;
 144  144                  now.tv_nsec += nslt;
 145  145          } else if (hres_adj < 0) {
 146  146                  nslt = -(nslt >> ADJ_SHIFT);
 147  147                  if (nslt < hres_adj)
 148  148                          nslt = (int)hres_adj;
 149  149                  now.tv_nsec += nslt;
 150  150          }
 151  151  
 152  152          /*
 153  153           * Rope in tv_nsec from any excessive adjustments.
 154  154           */
 155  155          while ((unsigned long)now.tv_nsec >= NANOSEC) {
 156  156                  now.tv_nsec -= NANOSEC;
 157  157                  now.tv_sec++;
 158  158          }
 159  159  
 160  160          if ((cp->cp_hres_lock & ~1) != lock_prev)
 161  161                  goto loop;
 162  162  
 163  163          *tsp = now;
 164  164          return (0);
 165  165  }
 166  166  
 167  167  /*
 168  168   * The __cp_clock_gettime_monotonic function expects that hrt2ts be present
 169  169   * when the code is finally linked.
 170  170   * (The amd64 version has no such requirement.)
 171  171   */
 172  172  extern void hrt2ts(hrtime_t, timespec_t *);
 173  173  
 174  174  int
 175  175  __cp_clock_gettime_monotonic(comm_page_t *cp, timespec_t *tsp)
 176  176  {
 177  177          hrtime_t hrt;
 178  178  
 179  179          hrt = __cp_gethrtime(cp);
 180  180          hrt2ts(hrt, tsp);
 181  181          return (0);
 182  182  }
 183  183  
 184  184  #endif /* __amd64 */
  
    | 
      ↓ open down ↓ | 
    184 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX