Print this page
NEX-18463 Parallel dump produces corrupted dump file
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
NEX-5177 backport illumos 6345 remove xhat support
Reviewed by: Kevin Crowe <kevin.crowe@nexenta.com>
6345 remove xhat support
Reviewed by: Dan McDonald <danmcd@omniti.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Approved by: Hans Rosenfeld <rosenfeld@grumpf.hope-2000.org>
NEX-5164 backport illumos 6514 AS_* lock macros simplification
Reviewed by: Kevin Crowe <kevin.crowe@nexenta.com>
6514 AS_* lock macros simplification
Reviewed by: Piotr Jasiukajtis <estibi@me.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Albert Lee <trisk@omniti.com>
Approved by: Dan McDonald <danmcd@omniti.com>
re #13613 rb4516 Tunables needs volatile keyword

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
          +++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
↓ open down ↓ 14 lines elided ↑ open up ↑
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   */
  24   24  /*
  25      - * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
       25 + * Copyright 2018 Nexenta Systems, Inc.  All rights reserved.
  26   26   * Copyright 2016 Gary Mills
  27   27   */
  28   28  
  29   29  /*
  30   30   * VM - Hardware Address Translation management for Spitfire MMU.
  31   31   *
  32   32   * This file implements the machine specific hardware translation
  33   33   * needed by the VM system.  The machine independent interface is
  34   34   * described in <vm/hat.h> while the machine dependent interface
  35   35   * and data structures are described in <vm/hat_sfmmu.h>.
↓ open down ↓ 617 lines elided ↑ open up ↑
 653  653  #define TSB_RSS_FACTOR          (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
 654  654  #define SFMMU_RSS_TSBSIZE(tsbszc)       (tsb_rss_factor << tsbszc)
 655  655  #define SELECT_TSB_SIZECODE(pgcnt) ( \
 656  656          (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
 657  657          default_tsb_size)
 658  658  #define TSB_OK_SHRINK() \
 659  659          (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
 660  660  #define TSB_OK_GROW()   \
 661  661          (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
 662  662  
 663      -int     enable_tsb_rss_sizing = 1;
 664      -int     tsb_rss_factor  = (int)TSB_RSS_FACTOR;
      663 +volatile int    enable_tsb_rss_sizing = 1;
      664 +volatile int    tsb_rss_factor = (int)TSB_RSS_FACTOR;
 665  665  
 666  666  /* which TSB size code to use for new address spaces or if rss sizing off */
 667      -int default_tsb_size = TSB_8K_SZCODE;
      667 +volatile int default_tsb_size = TSB_8K_SZCODE;
 668  668  
 669  669  static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
 670      -uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
      670 +volatile uint64_t tsb_alloc_hiwater_factor;     /* tsb_alloc_hiwater =  */
      671 +                                                /*      physmem / this  */
 671  672  #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT        32
 672  673  
 673  674  #ifdef DEBUG
 674  675  static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */
 675  676  static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */
 676  677  static int tsb_alloc_mtbf = 0;  /* fail allocation every n attempts */
 677  678  static int tsb_alloc_fail_mtbf = 0;
 678  679  static int tsb_alloc_count = 0;
 679  680  #endif /* DEBUG */
 680  681  
↓ open down ↓ 5499 lines elided ↑ open up ↑
6180 6181           * and no longer referenced.  So no need to decrement ttecnt
6181 6182           * in the region structure here.
6182 6183           */
6183 6184          if (ttecnt > 0 && sfmmup != NULL) {
6184 6185                  atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt);
6185 6186          }
6186 6187          return (addr);
6187 6188  }
6188 6189  
6189 6190  /*
6190      - * Invalidate a virtual address range for the local CPU.
6191      - * For best performance ensure that the va range is completely
6192      - * mapped, otherwise the entire TLB will be flushed.
     6191 + * Flush the TLB for the local CPU
     6192 + * Invoked from a slave CPU during panic() dumps.
6193 6193   */
6194 6194  void
6195      -hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size)
     6195 +hat_flush(void)
6196 6196  {
6197      -        ssize_t sz;
6198      -        caddr_t endva = va + size;
6199      -
6200      -        while (va < endva) {
6201      -                sz = hat_getpagesize(sfmmup, va);
6202      -                if (sz < 0) {
6203      -                        vtag_flushall();
6204      -                        break;
6205      -                }
6206      -                vtag_flushpage(va, (uint64_t)sfmmup);
6207      -                va += sz;
6208      -        }
     6197 +        vtag_flushall();
6209 6198  }
6210 6199  
6211 6200  /*
6212 6201   * Synchronize all the mappings in the range [addr..addr+len).
6213 6202   * Can be called with clearflag having two states:
6214 6203   * HAT_SYNC_DONTZERO means just return the rm stats
6215 6204   * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats
6216 6205   */
6217 6206  void
6218 6207  hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
↓ open down ↓ 9436 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX