Print this page
NEX-19598 HAT panic
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
NEX-16970 assertion failed: ht->ht_valid_cnt >= 0, file: ../../i86pc/vm/htable.c, line: 1204
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
NEX-18463 Parallel dump produces corrupted dump file
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
NEX-5164 backport illumos 6514 AS_* lock macros simplification
Reviewed by: Kevin Crowe <kevin.crowe@nexenta.com>
6514 AS_* lock macros simplification
Reviewed by: Piotr Jasiukajtis <estibi@me.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Albert Lee <trisk@omniti.com>
Approved by: Dan McDonald <danmcd@omniti.com>
        
*** 24,34 ****
  /*
   * Copyright (c) 2010, Intel Corporation.
   * All rights reserved.
   */
  /*
!  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
   * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
   */
  
  /*
   * VM - Hardware Address Translation management for i386 and amd64
--- 24,35 ----
  /*
   * Copyright (c) 2010, Intel Corporation.
   * All rights reserved.
   */
  /*
!  * Copyright 2019 Nexenta Systems, Inc.  All rights reserved.
!  * Copyright 2018 Joyent, Inc.  All rights reserved.
   * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
   */
  
  /*
   * VM - Hardware Address Translation management for i386 and amd64
*** 263,272 ****
--- 264,274 ----
          ASSERT(AS_WRITE_HELD(as));
          hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
          hat->hat_as = as;
          mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
          ASSERT(hat->hat_flags == 0);
+         hat->hat_unmaps = 0;
  
  #if defined(__xpv)
          /*
           * No VLP stuff on the hypervisor due to the 64-bit split top level
           * page tables.  On 32-bit it's not needed as the hypervisor takes
*** 399,409 ****
           * If the hat is currently a stealing victim, wait for the stealing
           * to finish.  Once we mark it as HAT_FREEING, htable_steal()
           * won't look at its pagetables anymore.
           */
          mutex_enter(&hat_list_lock);
!         while (hat->hat_flags & HAT_VICTIM)
                  cv_wait(&hat_list_cv, &hat_list_lock);
          hat->hat_flags |= HAT_FREEING;
          mutex_exit(&hat_list_lock);
  }
  
--- 401,411 ----
           * If the hat is currently a stealing victim, wait for the stealing
           * to finish.  Once we mark it as HAT_FREEING, htable_steal()
           * won't look at its pagetables anymore.
           */
          mutex_enter(&hat_list_lock);
!         while ((hat->hat_flags & HAT_VICTIM) || (hat->hat_unmaps > 0))
                  cv_wait(&hat_list_cv, &hat_list_lock);
          hat->hat_flags |= HAT_FREEING;
          mutex_exit(&hat_list_lock);
  }
  
*** 2460,2495 ****
                  handle_ranges(hat, cb, r_cnt, r);
          XPV_ALLOW_MIGRATE();
  }
  
  /*
!  * Invalidate a virtual address translation on a slave CPU during
!  * panic() dumps.
   */
  void
! hat_flush_range(hat_t *hat, caddr_t va, size_t size)
  {
-         ssize_t sz;
-         caddr_t endva = va + size;
- 
-         while (va < endva) {
-                 sz = hat_getpagesize(hat, va);
-                 if (sz < 0) {
  #ifdef __xpv
                          xen_flush_tlb();
  #else
                          flush_all_tlb_entries();
  #endif
-                         break;
-                 }
- #ifdef __xpv
-                 xen_flush_va(va);
- #else
-                 mmu_tlbflush_entry(va);
- #endif
-                 va += sz;
-         }
  }
  
  /*
   * synchronize mapping with software data structures
   *
--- 2462,2482 ----
                  handle_ranges(hat, cb, r_cnt, r);
          XPV_ALLOW_MIGRATE();
  }
  
  /*
!  * Flush the TLB for the local CPU
!  * Invoked from a slave CPU during panic() dumps.
   */
  void
! hat_flush(void)
  {
  #ifdef __xpv
                          xen_flush_tlb();
  #else
                          flush_all_tlb_entries();
  #endif
  }
  
  /*
   * synchronize mapping with software data structures
   *
*** 3319,3340 ****
  hat_page_getattr(struct page *pp, uint_t flag)
  {
          return (PP_GETRM(pp, flag));
  }
  
- 
  /*
   * common code used by hat_pageunload() and hment_steal()
   */
  hment_t *
  hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
  {
          x86pte_t old_pte;
          pfn_t pfn = pp->p_pagenum;
          hment_t *hm;
  
          /*
           * We need to acquire a hold on the htable in order to
           * do the invalidate. We know the htable must exist, since
           * unmap's don't release the htable until after removing any
           * hment. Having x86_hm_enter() keeps that from proceeding.
           */
--- 3306,3344 ----
  hat_page_getattr(struct page *pp, uint_t flag)
  {
          return (PP_GETRM(pp, flag));
  }
  
  /*
   * common code used by hat_pageunload() and hment_steal()
   */
  hment_t *
  hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
  {
          x86pte_t old_pte;
          pfn_t pfn = pp->p_pagenum;
          hment_t *hm;
+         hat_t *hat = ht->ht_hat;
  
          /*
+          * There is a race between this function and the freeing of a HAT
+          * whose owning process is exiting; process exit code ignores htable
+          * reference counts.
+          * If the HAT is already freeing (HAT_FREEING) no-op this function.
+          * Otherwise increment hat_unmaps to block the hat from being free'd
+          * until this function completes.
+          */
+         mutex_enter(&hat_list_lock);
+         if (hat->hat_flags & HAT_FREEING) {
+                 mutex_exit(&hat_list_lock);
+                 x86_hm_exit(pp);
+                 return (NULL);
+         }
+         ++(hat->hat_unmaps);
+         mutex_exit(&hat_list_lock);
+ 
+         /*
           * We need to acquire a hold on the htable in order to
           * do the invalidate. We know the htable must exist, since
           * unmap's don't release the htable until after removing any
           * hment. Having x86_hm_enter() keeps that from proceeding.
           */
*** 3367,3381 ****
           * Remove the mapping list entry for this page.
           */
          hm = hment_remove(pp, ht, entry);
  
          /*
!          * drop the mapping list lock so that we might free the
!          * hment and htable.
           */
          x86_hm_exit(pp);
          htable_release(ht);
          return (hm);
  }
  
  extern int      vpm_enable;
  /*
--- 3371,3389 ----
           * Remove the mapping list entry for this page.
           */
          hm = hment_remove(pp, ht, entry);
  
          /*
!          * drop the mapping list lock so that we might free the hment and htable
           */
          x86_hm_exit(pp);
          htable_release(ht);
+ 
+         mutex_enter(&hat_list_lock);
+         --(hat->hat_unmaps);
+         cv_broadcast(&hat_list_cv);
+         mutex_exit(&hat_list_lock);
          return (hm);
  }
  
  extern int      vpm_enable;
  /*