128 #define SYNC_ITERATIONS         10
 129 
 130 #define TSC_CONVERT_AND_ADD(tsc, hrt, scale) {          \
 131         unsigned int *_l = (unsigned int *)&(tsc);  \
 132         (hrt) += mul32(_l[1], scale) << NSEC_SHIFT;       \
 133         (hrt) += mul32(_l[0], scale) >> (32 - NSEC_SHIFT); \
 134 }
 135 
 136 #define TSC_CONVERT(tsc, hrt, scale) {                  \
 137         unsigned int *_l = (unsigned int *)&(tsc);  \
 138         (hrt) = mul32(_l[1], scale) << NSEC_SHIFT;        \
 139         (hrt) += mul32(_l[0], scale) >> (32 - NSEC_SHIFT); \
 140 }
 141 
 142 int tsc_master_slave_sync_needed = 1;
 143 
 144 typedef struct tsc_sync {
 145         volatile hrtime_t master_tsc, slave_tsc;
 146 } tsc_sync_t;
 147 static tsc_sync_t *tscp;
 148 
 149 static hrtime_t tsc_last_jumped = 0;
 150 static int      tsc_jumped = 0;
 151 static uint32_t tsc_wayback = 0;
 152 /*
 153  * The cap of 1 second was chosen since it is the frequency at which the
 154  * tsc_tick() function runs which means that when gethrtime() is called it
 155  * should never be more than 1 second since tsc_last was updated.
 156  */
 157 static hrtime_t tsc_resume_cap_ns = NANOSEC;     /* 1s */
 158 
 159 static hrtime_t shadow_tsc_hrtime_base;
 160 static hrtime_t shadow_tsc_last;
 161 static uint_t   shadow_nsec_scale;
 162 static uint32_t shadow_hres_lock;
 163 int get_tsc_ready();
 164 
 165 static inline
 166 hrtime_t tsc_protect(hrtime_t a) {
 167         if (a > tsc_resume_cap) {
 
 428         hrtime_t hrt;
 429         ulong_t flags;
 430 
 431         /*
 432          * Similarly to tsc_gethrtime_delta, we need to disable preemption
 433          * to prevent migration between the call to tsc_gethrtimeunscaled
 434          * and adding the CPU's hrtime delta. Note that disabling and
 435          * reenabling preemption is forbidden here because we may be in the
 436          * middle of a fast trap. In the amd64 kernel we cannot tolerate
 437          * preemption during a fast trap. See _update_sregs().
 438          */
 439 
 440         flags = clear_int_flag();
 441         hrt = tsc_gethrtimeunscaled() + tsc_sync_tick_delta[CPU->cpu_id];
 442         restore_int_flag(flags);
 443 
 444         return (hrt);
 445 }
 446 
 447 /*
 448  * TSC Sync Master
 449  *
 450  * Typically called on the boot CPU, this attempts to quantify TSC skew between
 451  * different CPUs.  If an appreciable difference is found, gethrtimef will be
 452  * changed to point to tsc_gethrtime_delta().
 453  *
 454  * Calculating skews is precise only when the master and slave TSCs are read
 455  * simultaneously; however, there is no algorithm that can read both CPUs in
 456  * perfect simultaneity.  The proposed algorithm is an approximate method based
 457  * on the behaviour of cache management.  The slave CPU continuously polls the
 458  * TSC while reading a global variable updated by the master CPU.  The latest
 459  * TSC reading is saved when the master's update (forced via mfence) reaches
 460  * visibility on the slave.  The master will also take a TSC reading
 461  * immediately following the mfence.
 462  *
 463  * While the delay between cache line invalidation on the slave and mfence
 464  * completion on the master is not repeatable, the error is heuristically
 465  * assumed to be 1/4th of the write time recorded by the master.  Multiple
 466  * samples are taken to control for the variance caused by external factors
 467  * such as bus contention.  Each sample set is independent per-CPU to control
 468  * for differing memory latency on NUMA systems.
 469  *
 470  * TSC sync is disabled in the context of virtualization because the CPUs
 471  * assigned to the guest are virtual CPUs which means the real CPUs on which
 472  * guest runs keep changing during life time of guest OS. So we would end up
 473  * calculating TSC skews for a set of CPUs during boot whereas the guest
 474  * might migrate to a different set of physical CPUs at a later point of
 475  * time.
 476  */
 477 void
 478 tsc_sync_master(processorid_t slave)
 479 {
 480         ulong_t flags, source, min_write_time = ~0UL;
 481         hrtime_t write_time, mtsc_after, last_delta = 0;
 482         tsc_sync_t *tsc = tscp;
 483         int cnt;
 484         int hwtype;
 485 
 486         hwtype = get_hwenv();
 487         if (!tsc_master_slave_sync_needed || (hwtype & HW_VIRTUAL) != 0)
 488                 return;
 489 
 490         flags = clear_int_flag();
 491         source = CPU->cpu_id;
 492 
 493         for (cnt = 0; cnt < SYNC_ITERATIONS; cnt++) {
 494                 while (tsc_sync_go != TSC_SYNC_GO)
 495                         SMT_PAUSE();
 496 
 497                 tsc->master_tsc = tsc_read();
 498                 membar_enter();
 499                 mtsc_after = tsc_read();
 500                 while (tsc_sync_go != TSC_SYNC_DONE)
 501                         SMT_PAUSE();
 502                 write_time =  mtsc_after - tsc->master_tsc;
 503                 if (write_time <= min_write_time) {
 504                         hrtime_t tdelta;
 505 
 506                         tdelta = tsc->slave_tsc - mtsc_after;
 507                         if (tdelta < 0)
 508                                 tdelta = -tdelta;
 509                         /*
 510                          * If the margin exists, subtract 1/4th of the measured
 511                          * write time from the master's TSC value.  This is an
 512                          * estimate of how late the mfence completion came
 513                          * after the slave noticed the cache line change.
 514                          */
 515                         if (tdelta > (write_time/4)) {
 516                                 tdelta = tsc->slave_tsc -
 517                                     (mtsc_after - (write_time/4));
 518                         } else {
 519                                 tdelta = tsc->slave_tsc - mtsc_after;
 520                         }
 521                         last_delta = tsc_sync_tick_delta[source] - tdelta;
 522                         tsc_sync_tick_delta[slave] = last_delta;
 523                         min_write_time = write_time;
 524                 }
 525 
 526                 tsc->master_tsc = tsc->slave_tsc = write_time = 0;
 527                 membar_enter();
 528                 tsc_sync_go = TSC_SYNC_STOP;
 529         }
 530 
 531         /*
 532          * Only enable the delta variants of the TSC functions if the measured
 533          * skew is greater than the fastest write time.
 534          */
 535         last_delta = (last_delta < 0) ? -last_delta : last_delta;
 536         if (last_delta > min_write_time) {
 537                 gethrtimef = tsc_gethrtime_delta;
 538                 gethrtimeunscaledf = tsc_gethrtimeunscaled_delta;
 539                 tsc_ncpu = NCPU;
 540         }
 541         restore_int_flag(flags);
 542 }
 543 
 544 /*
 545  * TSC Sync Slave
 546  *
 547  * Called by a CPU which has just been onlined.  It is expected that the CPU
 548  * performing the online operation will call tsc_sync_master().
 549  *
 550  * Like tsc_sync_master, this logic is skipped on virtualized platforms.
 551  */
 552 void
 553 tsc_sync_slave(void)
 554 {
 555         ulong_t flags;
 556         hrtime_t s1;
 557         tsc_sync_t *tsc = tscp;
 558         int cnt;
 559         int hwtype;
 560 
 561         hwtype = get_hwenv();
 562         if (!tsc_master_slave_sync_needed || (hwtype & HW_VIRTUAL) != 0)
 563                 return;
 564 
 565         flags = clear_int_flag();
 566 
 567         for (cnt = 0; cnt < SYNC_ITERATIONS; cnt++) {
 568                 /* Re-fill the cache line */
 569                 s1 = tsc->master_tsc;
 570                 membar_enter();
 571                 tsc_sync_go = TSC_SYNC_GO;
 572                 do {
 573                         /*
 574                          * Do not put an SMT_PAUSE here.  If the master and
 575                          * slave are the same hyper-threaded CPU, we want the
 576                          * master to yield as quickly as possible to the slave.
 577                          */
 578                         s1 = tsc_read();
 579                 } while (tsc->master_tsc == 0);
 580                 tsc->slave_tsc = s1;
 581                 membar_enter();
 582                 tsc_sync_go = TSC_SYNC_DONE;
 583 
 584                 while (tsc_sync_go != TSC_SYNC_STOP)
 585                         SMT_PAUSE();
 586         }
 587 
 588         restore_int_flag(flags);
 589 }
 590 
 591 /*
 592  * Called once per second on a CPU from the cyclic subsystem's
 593  * CY_HIGH_LEVEL interrupt.  (No longer just cpu0-only)
 594  */
 595 void
 596 tsc_tick(void)
 
 687          * Allocate memory for the structure used in the tsc sync logic.
 688          * This structure should be aligned on a multiple of cache line size.
 689          */
 690         tscp = kmem_zalloc(PAGESIZE, KM_SLEEP);
 691 
 692         /*
 693          * Convert the TSC resume cap ns value into its unscaled TSC value.
 694          * See tsc_gethrtime().
 695          */
 696         if (tsc_resume_cap == 0)
 697                 TSC_CONVERT(tsc_resume_cap_ns, tsc_resume_cap, nsec_unscale);
 698 }
 699 
 700 int
 701 get_tsc_ready()
 702 {
 703         return (tsc_ready);
 704 }
 705 
 706 /*
 707  * Adjust all the deltas by adding the passed value to the array and activate
 708  * the "delta" versions of the gethrtime functions.  It is possible that the
 709  * adjustment could be negative.  Such may occur if the SunOS instance was
 710  * moved by a virtual manager to a machine with a higher value of TSC.
 711  */
 712 void
 713 tsc_adjust_delta(hrtime_t tdelta)
 714 {
 715         int             i;
 716 
 717         for (i = 0; i < NCPU; i++) {
 718                 tsc_sync_tick_delta[i] += tdelta;
 719         }
 720 
 721         gethrtimef = tsc_gethrtime_delta;
 722         gethrtimeunscaledf = tsc_gethrtimeunscaled_delta;
 723         tsc_ncpu = NCPU;
 724 }
 725 
 726 /*
 727  * Functions to manage TSC and high-res time on suspend and resume.
 728  */
 729 
 730 /* tod_ops from "uts/i86pc/io/todpc_subr.c" */
 731 extern tod_ops_t *tod_ops;
 732 
 733 static uint64_t tsc_saved_tsc = 0; /* 1 in 2^64 chance this'll screw up! */
 734 static timestruc_t tsc_saved_ts;
 735 static int      tsc_needs_resume = 0;   /* We only want to do this once. */
 736 int             tsc_delta_onsuspend = 0;
 737 int             tsc_adjust_seconds = 1;
 738 int             tsc_suspend_count = 0;
 739 int             tsc_resume_in_cyclic = 0;
 740 
 741 /*
 742  * Take snapshots of the current time and do any other pre-suspend work.
 743  */
 744 void
 745 tsc_suspend(void)
 746 {
 747         /*
 748          * We need to collect the time at which we suspended here so we know
 749          * now much should be added during the resume.  This is called by each
 750          * CPU, so reentry must be properly handled.
 751          */
 752         if (tsc_gethrtime_enable) {
 753                 /*
 754                  * Perform the tsc_read after acquiring the lock to make it as
 755                  * accurate as possible in the face of contention.
 756                  */
 757                 mutex_enter(&tod_lock);
 758                 tsc_saved_tsc = tsc_read();
 759                 tsc_saved_ts = TODOP_GET(tod_ops);
 760                 mutex_exit(&tod_lock);
 761                 /* We only want to do this once. */
 762                 if (tsc_needs_resume == 0) {
 763                         if (tsc_delta_onsuspend) {
 764                                 tsc_adjust_delta(tsc_saved_tsc);
 765                         } else {
 766                                 tsc_adjust_delta(nsec_scale);
 767                         }
 768                         tsc_suspend_count++;
 769                 }
 770         }
 771 
 772         invalidate_cache();
 773         tsc_needs_resume = 1;
 774 }
 775 
 776 /*
 777  * Restore all timestamp state based on the snapshots taken at suspend time.
 778  */
 779 void
 780 tsc_resume(void)
 781 {
 782         /*
 783          * We only need to (and want to) do this once.  So let the first
 784          * caller handle this (we are locked by the cpu lock), as it
 785          * is preferential that we get the earliest sync.
 786          */
 787         if (tsc_needs_resume) {
 788                 /*
 789                  * If using the TSC, adjust the delta based on how long
 790                  * we were sleeping (or away).  We also adjust for
 791                  * migration and a grown TSC.
 792                  */
 793                 if (tsc_saved_tsc != 0) {
 794                         timestruc_t     ts;
 795                         hrtime_t        now, sleep_tsc = 0;
 796                         int             sleep_sec;
 797                         extern void     tsc_tick(void);
 
 | 
 
 
 128 #define SYNC_ITERATIONS         10
 129 
 130 #define TSC_CONVERT_AND_ADD(tsc, hrt, scale) {          \
 131         unsigned int *_l = (unsigned int *)&(tsc);  \
 132         (hrt) += mul32(_l[1], scale) << NSEC_SHIFT;       \
 133         (hrt) += mul32(_l[0], scale) >> (32 - NSEC_SHIFT); \
 134 }
 135 
 136 #define TSC_CONVERT(tsc, hrt, scale) {                  \
 137         unsigned int *_l = (unsigned int *)&(tsc);  \
 138         (hrt) = mul32(_l[1], scale) << NSEC_SHIFT;        \
 139         (hrt) += mul32(_l[0], scale) >> (32 - NSEC_SHIFT); \
 140 }
 141 
 142 int tsc_master_slave_sync_needed = 1;
 143 
 144 typedef struct tsc_sync {
 145         volatile hrtime_t master_tsc, slave_tsc;
 146 } tsc_sync_t;
 147 static tsc_sync_t *tscp;
 148 static hrtime_t largest_tsc_delta = 0;
 149 static ulong_t shortest_write_time = ~0UL;
 150 
 151 static hrtime_t tsc_last_jumped = 0;
 152 static int      tsc_jumped = 0;
 153 static uint32_t tsc_wayback = 0;
 154 /*
 155  * The cap of 1 second was chosen since it is the frequency at which the
 156  * tsc_tick() function runs which means that when gethrtime() is called it
 157  * should never be more than 1 second since tsc_last was updated.
 158  */
 159 static hrtime_t tsc_resume_cap_ns = NANOSEC;     /* 1s */
 160 
 161 static hrtime_t shadow_tsc_hrtime_base;
 162 static hrtime_t shadow_tsc_last;
 163 static uint_t   shadow_nsec_scale;
 164 static uint32_t shadow_hres_lock;
 165 int get_tsc_ready();
 166 
 167 static inline
 168 hrtime_t tsc_protect(hrtime_t a) {
 169         if (a > tsc_resume_cap) {
 
 430         hrtime_t hrt;
 431         ulong_t flags;
 432 
 433         /*
 434          * Similarly to tsc_gethrtime_delta, we need to disable preemption
 435          * to prevent migration between the call to tsc_gethrtimeunscaled
 436          * and adding the CPU's hrtime delta. Note that disabling and
 437          * reenabling preemption is forbidden here because we may be in the
 438          * middle of a fast trap. In the amd64 kernel we cannot tolerate
 439          * preemption during a fast trap. See _update_sregs().
 440          */
 441 
 442         flags = clear_int_flag();
 443         hrt = tsc_gethrtimeunscaled() + tsc_sync_tick_delta[CPU->cpu_id];
 444         restore_int_flag(flags);
 445 
 446         return (hrt);
 447 }
 448 
 449 /*
 450  * Called by the master in the TSC sync operation (usually the boot CPU).
 451  * If the slave is discovered to have a skew, gethrtimef will be changed to
 452  * point to tsc_gethrtime_delta(). Calculating skews is precise only when
 453  * the master and slave TSCs are read simultaneously; however, there is no
 454  * algorithm that can read both CPUs in perfect simultaneity. The proposed
 455  * algorithm is an approximate method based on the behaviour of cache
 456  * management. The slave CPU continuously reads TSC and then reads a global
 457  * variable which the master CPU updates. The moment the master's update reaches
 458  * the slave's visibility (being forced by an mfence operation) we use the TSC
 459  * reading taken on the slave. A corresponding TSC read will be taken on the
 460  * master as soon as possible after finishing the mfence operation. But the
 461  * delay between causing the slave to notice the invalid cache line and the
 462  * competion of mfence is not repeatable. This error is heuristically assumed
 463  * to be 1/4th of the total write time as being measured by the two TSC reads
 464  * on the master sandwiching the mfence. Furthermore, due to the nature of
 465  * bus arbitration, contention on memory bus, etc., the time taken for the write
 466  * to reflect globally can vary a lot. So instead of taking a single reading,
 467  * a set of readings are taken and the one with least write time is chosen
 468  * to calculate the final skew.
 469  *
 470  * TSC sync is disabled in the context of virtualization because the CPUs
 471  * assigned to the guest are virtual CPUs which means the real CPUs on which
 472  * guest runs keep changing during life time of guest OS. So we would end up
 473  * calculating TSC skews for a set of CPUs during boot whereas the guest
 474  * might migrate to a different set of physical CPUs at a later point of
 475  * time.
 476  */
 477 void
 478 tsc_sync_master(processorid_t slave)
 479 {
 480         ulong_t flags, source, min_write_time = ~0UL;
 481         hrtime_t write_time, x, mtsc_after, tdelta;
 482         tsc_sync_t *tsc = tscp;
 483         int cnt;
 484         int hwtype;
 485 
 486         hwtype = get_hwenv();
 487         if (!tsc_master_slave_sync_needed || (hwtype & HW_VIRTUAL) != 0)
 488                 return;
 489 
 490         flags = clear_int_flag();
 491         source = CPU->cpu_id;
 492 
 493         for (cnt = 0; cnt < SYNC_ITERATIONS; cnt++) {
 494                 while (tsc_sync_go != TSC_SYNC_GO)
 495                         SMT_PAUSE();
 496 
 497                 tsc->master_tsc = tsc_read();
 498                 membar_enter();
 499                 mtsc_after = tsc_read();
 500                 while (tsc_sync_go != TSC_SYNC_DONE)
 501                         SMT_PAUSE();
 502                 write_time =  mtsc_after - tsc->master_tsc;
 503                 if (write_time <= min_write_time) {
 504                         min_write_time = write_time;
 505                         /*
 506                          * Apply heuristic adjustment only if the calculated
 507                          * delta is > 1/4th of the write time.
 508                          */
 509                         x = tsc->slave_tsc - mtsc_after;
 510                         if (x < 0)
 511                                 x = -x;
 512                         if (x > (min_write_time/4))
 513                                 /*
 514                                  * Subtract 1/4th of the measured write time
 515                                  * from the master's TSC value, as an estimate
 516                                  * of how late the mfence completion came
 517                                  * after the slave noticed the cache line
 518                                  * change.
 519                                  */
 520                                 tdelta = tsc->slave_tsc -
 521                                     (mtsc_after - (min_write_time/4));
 522                         else
 523                                 tdelta = tsc->slave_tsc - mtsc_after;
 524                         tsc_sync_tick_delta[slave] =
 525                             tsc_sync_tick_delta[source] - tdelta;
 526                 }
 527 
 528                 tsc->master_tsc = tsc->slave_tsc = write_time = 0;
 529                 membar_enter();
 530                 tsc_sync_go = TSC_SYNC_STOP;
 531         }
 532         if (tdelta < 0)
 533                 tdelta = -tdelta;
 534         if (tdelta > largest_tsc_delta)
 535                 largest_tsc_delta = tdelta;
 536         if (min_write_time < shortest_write_time)
 537                 shortest_write_time = min_write_time;
 538         /*
 539          * Enable delta variants of tsc functions if the largest of all chosen
 540          * deltas is > smallest of the write time.
 541          */
 542         if (largest_tsc_delta > shortest_write_time) {
 543                 gethrtimef = tsc_gethrtime_delta;
 544                 gethrtimeunscaledf = tsc_gethrtimeunscaled_delta;
 545                 tsc_ncpu = NCPU;
 546         }
 547         restore_int_flag(flags);
 548 }
 549 
 550 /*
 551  * Called by a CPU which has just been onlined.  It is expected that the CPU
 552  * performing the online operation will call tsc_sync_master().
 553  *
 554  * TSC sync is disabled in the context of virtualization. See comments
 555  * above tsc_sync_master.
 556  */
 557 void
 558 tsc_sync_slave(void)
 559 {
 560         ulong_t flags;
 561         hrtime_t s1;
 562         tsc_sync_t *tsc = tscp;
 563         int cnt;
 564         int hwtype;
 565 
 566         hwtype = get_hwenv();
 567         if (!tsc_master_slave_sync_needed || (hwtype & HW_VIRTUAL) != 0)
 568                 return;
 569 
 570         flags = clear_int_flag();
 571 
 572         for (cnt = 0; cnt < SYNC_ITERATIONS; cnt++) {
 573                 /* Re-fill the cache line */
 574                 s1 = tsc->master_tsc;
 575                 membar_enter();
 576                 tsc_sync_go = TSC_SYNC_GO;
 577                 do {
 578                         /*
 579                          * Do not put an SMT_PAUSE here. For instance,
 580                          * if the master and slave are really the same
 581                          * hyper-threaded CPU, then you want the master
 582                          * to yield to the slave as quickly as possible here,
 583                          * but not the other way.
 584                          */
 585                         s1 = tsc_read();
 586                 } while (tsc->master_tsc == 0);
 587                 tsc->slave_tsc = s1;
 588                 membar_enter();
 589                 tsc_sync_go = TSC_SYNC_DONE;
 590 
 591                 while (tsc_sync_go != TSC_SYNC_STOP)
 592                         SMT_PAUSE();
 593         }
 594 
 595         restore_int_flag(flags);
 596 }
 597 
 598 /*
 599  * Called once per second on a CPU from the cyclic subsystem's
 600  * CY_HIGH_LEVEL interrupt.  (No longer just cpu0-only)
 601  */
 602 void
 603 tsc_tick(void)
 
 694          * Allocate memory for the structure used in the tsc sync logic.
 695          * This structure should be aligned on a multiple of cache line size.
 696          */
 697         tscp = kmem_zalloc(PAGESIZE, KM_SLEEP);
 698 
 699         /*
 700          * Convert the TSC resume cap ns value into its unscaled TSC value.
 701          * See tsc_gethrtime().
 702          */
 703         if (tsc_resume_cap == 0)
 704                 TSC_CONVERT(tsc_resume_cap_ns, tsc_resume_cap, nsec_unscale);
 705 }
 706 
 707 int
 708 get_tsc_ready()
 709 {
 710         return (tsc_ready);
 711 }
 712 
 713 /*
 714  * Adjust all the deltas by adding the passed value to the array.
 715  * Then use the "delt" versions of the the gethrtime functions.
 716  * Note that 'tdelta' _could_ be a negative number, which should
 717  * reduce the values in the array (used, for example, if the Solaris
 718  * instance was moved by a virtual manager to a machine with a higher
 719  * value of tsc).
 720  */
 721 void
 722 tsc_adjust_delta(hrtime_t tdelta)
 723 {
 724         int             i;
 725 
 726         for (i = 0; i < NCPU; i++) {
 727                 tsc_sync_tick_delta[i] += tdelta;
 728         }
 729 
 730         gethrtimef = tsc_gethrtime_delta;
 731         gethrtimeunscaledf = tsc_gethrtimeunscaled_delta;
 732         tsc_ncpu = NCPU;
 733 }
 734 
 735 /*
 736  * Functions to manage TSC and high-res time on suspend and resume.
 737  */
 738 
 739 /*
 740  * declarations needed for time adjustment
 741  */
 742 extern void     rtcsync(void);
 743 extern tod_ops_t *tod_ops;
 744 /* There must be a better way than exposing nsec_scale! */
 745 extern uint_t   nsec_scale;
 746 static uint64_t tsc_saved_tsc = 0; /* 1 in 2^64 chance this'll screw up! */
 747 static timestruc_t tsc_saved_ts;
 748 static int      tsc_needs_resume = 0;   /* We only want to do this once. */
 749 int             tsc_delta_onsuspend = 0;
 750 int             tsc_adjust_seconds = 1;
 751 int             tsc_suspend_count = 0;
 752 int             tsc_resume_in_cyclic = 0;
 753 
 754 /*
 755  * Let timestamp.c know that we are suspending.  It needs to take
 756  * snapshots of the current time, and do any pre-suspend work.
 757  */
 758 void
 759 tsc_suspend(void)
 760 {
 761 /*
 762  * What we need to do here, is to get the time we suspended, so that we
 763  * know how much we should add to the resume.
 764  * This routine is called by each CPU, so we need to handle reentry.
 765  */
 766         if (tsc_gethrtime_enable) {
 767                 /*
 768                  * We put the tsc_read() inside the lock as it
 769                  * as no locking constraints, and it puts the
 770                  * aquired value closer to the time stamp (in
 771                  * case we delay getting the lock).
 772                  */
 773                 mutex_enter(&tod_lock);
 774                 tsc_saved_tsc = tsc_read();
 775                 tsc_saved_ts = TODOP_GET(tod_ops);
 776                 mutex_exit(&tod_lock);
 777                 /* We only want to do this once. */
 778                 if (tsc_needs_resume == 0) {
 779                         if (tsc_delta_onsuspend) {
 780                                 tsc_adjust_delta(tsc_saved_tsc);
 781                         } else {
 782                                 tsc_adjust_delta(nsec_scale);
 783                         }
 784                         tsc_suspend_count++;
 785                 }
 786         }
 787 
 788         invalidate_cache();
 789         tsc_needs_resume = 1;
 790 }
 791 
 792 /*
 793  * Restore all timestamp state based on the snapshots taken at
 794  * suspend time.
 795  */
 796 void
 797 tsc_resume(void)
 798 {
 799         /*
 800          * We only need to (and want to) do this once.  So let the first
 801          * caller handle this (we are locked by the cpu lock), as it
 802          * is preferential that we get the earliest sync.
 803          */
 804         if (tsc_needs_resume) {
 805                 /*
 806                  * If using the TSC, adjust the delta based on how long
 807                  * we were sleeping (or away).  We also adjust for
 808                  * migration and a grown TSC.
 809                  */
 810                 if (tsc_saved_tsc != 0) {
 811                         timestruc_t     ts;
 812                         hrtime_t        now, sleep_tsc = 0;
 813                         int             sleep_sec;
 814                         extern void     tsc_tick(void);
 
 |