Print this page
More stats to SIOCIPFCFWCFG and add SIOCIPFCFWNEWSZ to affect ring-buffer size.


  80 
  81 /*
  82  * Because ipf's test tools in $SRC/cmd insert all of these files, we need to
  83  * stub out what we can vs. drag in even more headers and who knows what else.
  84  */
  85 #ifdef _KERNEL
  86 
  87 /*
  88  * CFW event ring buffer.  Remember, this is for ALL ZONES because only a
  89  * global-zone event-reader will be consuming these.  In other words, it's
  90  * not something to instantiate per-netstack.
  91  */
  92 
  93 /*
  94  * We may want to get more sophisticated and performant (e.g. per-processor),
  95  * but for now keep the ring buffer simple and stupid.
  96  */
  97 
  98 /* Must be a power of 2, to be bitmaskable, and must be countable by a uint_t */
  99 
 100 #define IPF_CFW_RING_BUFS       1024
 101 #define IPF_CFW_RING_MASK (IPF_CFW_RING_BUFS - 1)

 102 
 103 /* Assume C's init-to-zero is sufficient for these types... */
 104 static kmutex_t cfw_ringlock;
 105 static kcondvar_t cfw_ringcv;
 106 
 107 static cfwev_t cfw_evring[IPF_CFW_RING_BUFS];



 108 /* If these are equal, we're either empty or full. */
 109 static uint_t cfw_ringstart, cfw_ringend;
 110 static boolean_t cfw_ringfull;  /* Tell the difference here! */
 111 static uint64_t cfw_evreports;
 112 static uint64_t cfw_evdrops;
 113 
 114 /*
 115  * Place an event in the CFW event ring buffer.
 116  *
 117  * For now, be simple and drop the oldest event if we overflow. We may wish to
 118  * selectively drop older events based on type in the future.
 119  */
 120 static void
 121 ipf_cfwev_report(cfwev_t *event)
 122 {
 123         mutex_enter(&cfw_ringlock);
 124         if (cfw_ringfull) {
 125                 cfw_ringstart++;
 126                 cfw_ringstart &= IPF_CFW_RING_MASK;
 127                 cfw_ringend++;
 128                 cfw_ringend &= IPF_CFW_RING_MASK;
 129                 DTRACE_PROBE(ipf__cfw__evdrop);
 130                 cfw_evdrops++;
 131                 cfw_evring[cfw_ringend] = *event;
 132         } else {
 133                 cfw_evring[cfw_ringend] = *event;
 134                 cfw_ringend++;
 135                 cfw_ringend &= IPF_CFW_RING_MASK;
 136                 cfw_ringfull = (cfw_ringend == cfw_ringstart);
 137         }
 138         cfw_evreports++;
 139         cv_broadcast(&cfw_ringcv);
 140         mutex_exit(&cfw_ringlock);
 141 }
 142 
 143 #if 0
 144 /*
 145  * Simple event consumer which copies one event from the ring buffer into
 146  * what's provided.  In the future, maybe lock-then-callback, even with a
 147  * request for multiple events?
 148  *
 149  * If there are no events, either cv_wait() or return B_FALSE, depending on
 150  * "block".
 151  */
 152 boolean_t
 153 ipf_cfwev_consume(cfwev_t *event, boolean_t block)
 154 {
 155         mutex_enter(&cfw_ringlock);
 156 
 157         /*
 158          * Return B_FALSE if non-block and no data, OR if we receive a signal.
 159          */
 160         while ((cfw_ringstart == cfw_ringend) && !cfw_ringfull) {
 161                 if (!block || !cv_wait_sig(&cfw_ringcv, &cfw_ringlock)) {
 162                         mutex_exit(&cfw_ringlock);
 163                         return (B_FALSE);
 164                 }
 165         }
 166 
 167         *event = cfw_evring[cfw_ringstart];
 168         cfw_ringstart++;
 169         cfw_ringstart &= IPF_CFW_RING_MASK;
 170         cfw_ringfull = B_FALSE;
 171         mutex_exit(&cfw_ringlock);
 172         return (B_TRUE);
 173 }
 174 #endif
 175 
 176 /*
 177  * More sophisticated access to multiple CFW events that can allow copying
 178  * straight from the ring buffer up to userland.  Requires a callback (which
 179  * could call uiomove() directly, OR to a local still-in-kernel buffer) that
 180  * must do the data copying-out.
 181  *
 182  * Callback function is of the form:
 183  *
 184  *      uint_t cfw_many_cb(cfwev_t *evptr, int num_avail, void *cbarg);
 185  *
 186  * The function must return how many events got consumed, which MUST be <= the
 187  * number available.  The function must ALSO UNDERSTAND that cfw_ringlock is


 197  * If you wish to attempt to coalesce reads (to reduce the likelihood of one
 198  * event at a time during high load) change the number of tries below to
 199  * something not 0. Early experiments set this to 10.
 200  *
 201  * The wait between tries is in usecs in cfw_timeout_wait. The pessimal
 202  * case for this is a timeout_wait-spaced trickle of one event at a time.
 203  */
 204 int cfw_timeout_tries = 0;
 205 int cfw_timeout_wait = 10000;   /* 10ms wait. */
 206 
 207 uint_t
 208 ipf_cfwev_consume_many(uint_t num_requested, boolean_t block,
 209     cfwmanycb_t cfw_many_cb, void *cbarg)
 210 {
 211         uint_t consumed = 0, cb_consumed, contig_size;
 212         int timeout_tries = cfw_timeout_tries;
 213 
 214         mutex_enter(&cfw_ringlock);
 215 
 216         /* Silly reality checks */
 217         ASSERT3U(cfw_ringstart, <, IPF_CFW_RING_BUFS);
 218         ASSERT3U(cfw_ringend, <, IPF_CFW_RING_BUFS);
 219 
 220         /*
 221          * Can goto here again if caller wants blocking. NOTE that
 222          * num_requested may have been decremented and consumed may have been
 223          * incremented if we arrive here via a goto after a cv_wait.
 224          */
 225 from_the_top:
 226         if (cfw_ringstart > cfw_ringend || cfw_ringfull)
 227                 contig_size = IPF_CFW_RING_BUFS - cfw_ringstart;
 228         else if (cfw_ringstart < cfw_ringend)
 229                 contig_size = cfw_ringend - cfw_ringstart;
 230         else if (block && cv_wait_sig(&cfw_ringcv, &cfw_ringlock)) {
 231                 /* Maybe something to consume now, try again. */
 232                 goto from_the_top;
 233         } else {
 234                 /* Nothing (more) to consume, return! */
 235                 goto bail;
 236         }
 237 
 238         ASSERT(contig_size + cfw_ringstart == cfw_ringend ||
 239             contig_size + cfw_ringstart == IPF_CFW_RING_BUFS);
 240 
 241         if (num_requested < contig_size)
 242                 contig_size = num_requested;
 243 
 244         cb_consumed = cfw_many_cb(&(cfw_evring[cfw_ringstart]), contig_size,
 245             cbarg);
 246         ASSERT(cb_consumed <= contig_size);
 247         cfw_ringstart += cb_consumed;
 248         consumed += cb_consumed;
 249         cfw_ringfull = (cfw_ringfull && cb_consumed == 0);
 250         if (cb_consumed < contig_size) {
 251                 /* Caller clearly had a problem. Reality check and bail. */
 252                 ASSERT((cfw_ringstart & IPF_CFW_RING_MASK) == cfw_ringstart);
 253                 goto bail;
 254         }
 255         ASSERT(cb_consumed == contig_size);
 256         cfw_ringstart &= IPF_CFW_RING_MASK; /* In case of wraparound. */
 257         num_requested -= contig_size;
 258 
 259         if (num_requested > 0 && cfw_ringstart != cfw_ringend) {
 260                 /* We must have wrapped around the end of the buffer! */
 261                 ASSERT(cfw_ringstart == 0);
 262                 ASSERT(!cfw_ringfull);
 263                 contig_size = cfw_ringend;
 264                 if (num_requested < contig_size)
 265                         contig_size = num_requested;
 266                 cb_consumed = cfw_many_cb(&(cfw_evring[cfw_ringstart]),
 267                     contig_size, cbarg);
 268                 cfw_ringstart += cb_consumed;
 269                 consumed += cb_consumed;
 270                 if (cb_consumed < contig_size) {
 271                         /*
 272                          * Caller clearly had a problem. Reality check and
 273                          * bail.
 274                          */
 275                         ASSERT(cfw_ringend > cfw_ringstart);
 276                         goto bail;
 277                 }
 278                 ASSERT(cb_consumed == contig_size);
 279                 num_requested -= contig_size;
 280         }
 281 
 282         ASSERT(consumed > 0);
 283 
 284         if (num_requested > 0 && block && timeout_tries > 0) {
 285                 clock_t delta = drv_usectohz(cfw_timeout_wait);
 286 


 482 
 483 /* Returning 0 means error indication. */
 484 static uint_t
 485 cfwlog_read_manycb(cfwev_t *evptr, uint_t num_avail, void *cbarg)
 486 {
 487         uio_error_t *ue = (uio_error_t *)cbarg;
 488 
 489         ASSERT(MUTEX_HELD(&cfw_ringlock));
 490 
 491         if (ue->ue_error != 0)
 492                 return (0);
 493 
 494         ue->ue_error = uiomove((caddr_t)evptr, num_avail * sizeof (*evptr),
 495             UIO_READ, ue->ue_uio);
 496         if (ue->ue_error != 0)
 497                 return (0);
 498 
 499         return (num_avail);
 500 }
 501 













































 502 /* ARGSUSED */
 503 int
 504 ipf_cfwlog_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *cp,
 505     int *rp)
 506 {
 507         ipfcfwcfg_t cfginfo;
 508         int error;
 509 
 510         if (cmd != SIOCIPFCFWCFG)
 511                 return (EIO);
 512 
 513         if (crgetzoneid(cp) != GLOBAL_ZONEID)
 514                 return (EACCES);
 515 
 516 #ifdef notyet
 517         error = COPYIN((caddr_t)data, (caddr_t)&cfginfo, sizeof (cfginfo));
 518         if (error != 0)
 519                 return (EFAULT);
 520         /* TODO: Resize ring buffer based on cfginfo.ipfcfwc_evringsize. */
 521 #endif
 522 
 523         cfginfo.ipfcfwc_maxevsize = sizeof (cfwev_t);
 524         cfginfo.ipfcfwc_evringsize = IPF_CFW_RING_BUFS;




 525 
















 526         error = COPYOUT((caddr_t)&cfginfo, (caddr_t)data, sizeof (cfginfo));
 527         if (error != 0)
 528                 return (EFAULT);
 529 
 530         return (0);
 531 }
 532 
 533 /* ARGSUSED */
 534 int
 535 ipf_cfwlog_read(dev_t dev, struct uio *uio, cred_t *cp)
 536 {
 537         uint_t requested, consumed;
 538         uio_error_t ue = {uio, 0};
 539         boolean_t block;
 540 
 541         if (uio->uio_resid == 0)
 542                 return (0);
 543         if (uio->uio_resid < sizeof (cfwev_t))
 544                 return (EINVAL);
 545         /* XXX KEBE ASKS: Check for resid being too big?!? */


 560                 /* No data available. */
 561                 ue.ue_error = EWOULDBLOCK;
 562         } else if (ue.ue_error != 0 || (block && consumed == 0)) {
 563                 /* We had a problem... */
 564                 if (ue.ue_error == 0) {
 565                         /* Cover case of cv_wait_sig() receiving a signal. */
 566                         ue.ue_error = EINTR;
 567                 }
 568                 mutex_enter(&cfw_ringlock);
 569                 DTRACE_PROBE1(ipf__cfw__uiodiscard, int, consumed);
 570                 cfw_evdrops += consumed;
 571                 mutex_exit(&cfw_ringlock);
 572         }
 573         return (ue.ue_error);
 574 }
 575 
 576 #else
 577 
 578 /* Blank stubs to satisfy userland's test compilations. */
 579 






 580 void
 581 ipf_log_cfwlog(struct ipstate *a, uint_t b, ipf_stack_t *c)
 582 {
 583 }
 584 
 585 void
 586 ipf_block_cfwlog(frentry_t *a, fr_info_t *b, ipf_stack_t *c)
 587 {
 588 }
 589 
 590 #endif  /* _KERNEL */


  80 
  81 /*
  82  * Because ipf's test tools in $SRC/cmd insert all of these files, we need to
  83  * stub out what we can vs. drag in even more headers and who knows what else.
  84  */
  85 #ifdef _KERNEL
  86 
  87 /*
  88  * CFW event ring buffer.  Remember, this is for ALL ZONES because only a
  89  * global-zone event-reader will be consuming these.  In other words, it's
  90  * not something to instantiate per-netstack.
  91  */
  92 
  93 /*
  94  * We may want to get more sophisticated and performant (e.g. per-processor),
  95  * but for now keep the ring buffer simple and stupid.
  96  */
  97 
  98 /* Must be a power of 2, to be bitmaskable, and must be countable by a uint_t */
  99 
 100 #define IPF_CFW_DEFAULT_RING_BUFS       1024
 101 #define IPF_CFW_MIN_RING_BUFS           8
 102 #define IPF_CFW_MAX_RING_BUFS           (1U << 31U)
 103 
 104 /* Assume C's init-to-zero is sufficient for these types... */
 105 static kmutex_t cfw_ringlock;
 106 static kcondvar_t cfw_ringcv;
 107 
 108 static cfwev_t *cfw_ring;       /* NULL by default. */
 109 static uint32_t cfw_ringsize;   /* 0 by default, number of array elements. */
 110 static uint32_t cfw_ringmask;   /* 0 by default. */
 111 
 112 /* If these are equal, we're either empty or full. */
 113 static uint_t cfw_ringstart, cfw_ringend;
 114 static boolean_t cfw_ringfull;  /* Tell the difference here! */
 115 static uint64_t cfw_evreports;
 116 static uint64_t cfw_evdrops;
 117 
 118 /*
 119  * Place an event in the CFW event ring buffer.
 120  *
 121  * For now, be simple and drop the oldest event if we overflow. We may wish to
 122  * selectively drop older events based on type in the future.
 123  */
 124 static void
 125 ipf_cfwev_report(cfwev_t *event)
 126 {
 127         mutex_enter(&cfw_ringlock);
 128         if (cfw_ringfull) {
 129                 cfw_ringstart++;
 130                 cfw_ringstart &= cfw_ringmask;
 131                 cfw_ringend++;
 132                 cfw_ringend &= cfw_ringmask;
 133                 DTRACE_PROBE(ipf__cfw__evdrop);
 134                 cfw_evdrops++;
 135                 cfw_ring[cfw_ringend] = *event;
 136         } else {
 137                 cfw_ring[cfw_ringend] = *event;
 138                 cfw_ringend++;
 139                 cfw_ringend &= cfw_ringmask;
 140                 cfw_ringfull = (cfw_ringend == cfw_ringstart);
 141         }
 142         cfw_evreports++;
 143         cv_broadcast(&cfw_ringcv);
 144         mutex_exit(&cfw_ringlock);
 145 }
 146 
 147 #if 0
 148 /*
 149  * Simple event consumer which copies one event from the ring buffer into
 150  * what's provided.  In the future, maybe lock-then-callback, even with a
 151  * request for multiple events?
 152  *
 153  * If there are no events, either cv_wait() or return B_FALSE, depending on
 154  * "block".
 155  */
 156 boolean_t
 157 ipf_cfwev_consume(cfwev_t *event, boolean_t block)
 158 {
 159         mutex_enter(&cfw_ringlock);
 160 
 161         /*
 162          * Return B_FALSE if non-block and no data, OR if we receive a signal.
 163          */
 164         while ((cfw_ringstart == cfw_ringend) && !cfw_ringfull) {
 165                 if (!block || !cv_wait_sig(&cfw_ringcv, &cfw_ringlock)) {
 166                         mutex_exit(&cfw_ringlock);
 167                         return (B_FALSE);
 168                 }
 169         }
 170 
 171         *event = cfw_ring[cfw_ringstart];
 172         cfw_ringstart++;
 173         cfw_ringstart &= IPF_CFW_RING_MASK;
 174         cfw_ringfull = B_FALSE;
 175         mutex_exit(&cfw_ringlock);
 176         return (B_TRUE);
 177 }
 178 #endif
 179 
 180 /*
 181  * More sophisticated access to multiple CFW events that can allow copying
 182  * straight from the ring buffer up to userland.  Requires a callback (which
 183  * could call uiomove() directly, OR to a local still-in-kernel buffer) that
 184  * must do the data copying-out.
 185  *
 186  * Callback function is of the form:
 187  *
 188  *      uint_t cfw_many_cb(cfwev_t *evptr, int num_avail, void *cbarg);
 189  *
 190  * The function must return how many events got consumed, which MUST be <= the
 191  * number available.  The function must ALSO UNDERSTAND that cfw_ringlock is


 201  * If you wish to attempt to coalesce reads (to reduce the likelihood of one
 202  * event at a time during high load) change the number of tries below to
 203  * something not 0. Early experiments set this to 10.
 204  *
 205  * The wait between tries is in usecs in cfw_timeout_wait. The pessimal
 206  * case for this is a timeout_wait-spaced trickle of one event at a time.
 207  */
 208 int cfw_timeout_tries = 0;
 209 int cfw_timeout_wait = 10000;   /* 10ms wait. */
 210 
 211 uint_t
 212 ipf_cfwev_consume_many(uint_t num_requested, boolean_t block,
 213     cfwmanycb_t cfw_many_cb, void *cbarg)
 214 {
 215         uint_t consumed = 0, cb_consumed, contig_size;
 216         int timeout_tries = cfw_timeout_tries;
 217 
 218         mutex_enter(&cfw_ringlock);
 219 
 220         /* Silly reality checks */
 221         ASSERT3U(cfw_ringstart, <, cfw_ringsize);
 222         ASSERT3U(cfw_ringend, <, cfw_ringsize);
 223 
 224         /*
 225          * Can goto here again if caller wants blocking. NOTE that
 226          * num_requested may have been decremented and consumed may have been
 227          * incremented if we arrive here via a goto after a cv_wait.
 228          */
 229 from_the_top:
 230         if (cfw_ringstart > cfw_ringend || cfw_ringfull)
 231                 contig_size = cfw_ringsize - cfw_ringstart;
 232         else if (cfw_ringstart < cfw_ringend)
 233                 contig_size = cfw_ringend - cfw_ringstart;
 234         else if (block && cv_wait_sig(&cfw_ringcv, &cfw_ringlock)) {
 235                 /* Maybe something to consume now, try again. */
 236                 goto from_the_top;
 237         } else {
 238                 /* Nothing (more) to consume, return! */
 239                 goto bail;
 240         }
 241 
 242         ASSERT(contig_size + cfw_ringstart == cfw_ringend ||
 243             contig_size + cfw_ringstart == cfw_ringsize);
 244 
 245         if (num_requested < contig_size)
 246                 contig_size = num_requested;
 247 
 248         cb_consumed = cfw_many_cb(&(cfw_ring[cfw_ringstart]), contig_size,
 249             cbarg);
 250         ASSERT(cb_consumed <= contig_size);
 251         cfw_ringstart += cb_consumed;
 252         consumed += cb_consumed;
 253         cfw_ringfull = (cfw_ringfull && cb_consumed == 0);
 254         if (cb_consumed < contig_size) {
 255                 /* Caller clearly had a problem. Reality check and bail. */
 256                 ASSERT((cfw_ringstart & cfw_ringmask) == cfw_ringstart);
 257                 goto bail;
 258         }
 259         ASSERT(cb_consumed == contig_size);
 260         cfw_ringstart &= cfw_ringmask;      /* In case of wraparound. */
 261         num_requested -= contig_size;
 262 
 263         if (num_requested > 0 && cfw_ringstart != cfw_ringend) {
 264                 /* We must have wrapped around the end of the buffer! */
 265                 ASSERT(cfw_ringstart == 0);
 266                 ASSERT(!cfw_ringfull);
 267                 contig_size = cfw_ringend;
 268                 if (num_requested < contig_size)
 269                         contig_size = num_requested;
 270                 cb_consumed = cfw_many_cb(&(cfw_ring[cfw_ringstart]),
 271                     contig_size, cbarg);
 272                 cfw_ringstart += cb_consumed;
 273                 consumed += cb_consumed;
 274                 if (cb_consumed < contig_size) {
 275                         /*
 276                          * Caller clearly had a problem. Reality check and
 277                          * bail.
 278                          */
 279                         ASSERT(cfw_ringend > cfw_ringstart);
 280                         goto bail;
 281                 }
 282                 ASSERT(cb_consumed == contig_size);
 283                 num_requested -= contig_size;
 284         }
 285 
 286         ASSERT(consumed > 0);
 287 
 288         if (num_requested > 0 && block && timeout_tries > 0) {
 289                 clock_t delta = drv_usectohz(cfw_timeout_wait);
 290 


 486 
 487 /* Returning 0 means error indication. */
 488 static uint_t
 489 cfwlog_read_manycb(cfwev_t *evptr, uint_t num_avail, void *cbarg)
 490 {
 491         uio_error_t *ue = (uio_error_t *)cbarg;
 492 
 493         ASSERT(MUTEX_HELD(&cfw_ringlock));
 494 
 495         if (ue->ue_error != 0)
 496                 return (0);
 497 
 498         ue->ue_error = uiomove((caddr_t)evptr, num_avail * sizeof (*evptr),
 499             UIO_READ, ue->ue_uio);
 500         if (ue->ue_error != 0)
 501                 return (0);
 502 
 503         return (num_avail);
 504 }
 505 
 506 int
 507 ipf_cfw_ring_resize(uint32_t newsize)
 508 {
 509         ASSERT(MUTEX_HELD(&cfw_ringlock) || newsize == IPF_CFW_RING_ALLOCATE ||
 510             newsize == IPF_CFW_RING_DESTROY);
 511 
 512         if (newsize == IPF_CFW_RING_ALLOCATE) {
 513                 if (cfw_ring != NULL)
 514                         return (EBUSY);
 515                 newsize = IPF_CFW_DEFAULT_RING_BUFS;
 516                 /* Fall through to allocating a new ring buffer. */
 517         } else {
 518                 /* We may be called during error cleanup, so be liberal here. */
 519                 if (cfw_ring == NULL && newsize == IPF_CFW_RING_DESTROY)
 520                         return (0);
 521                 kmem_free(cfw_ring, cfw_ringsize * sizeof (cfwev_t));
 522                 cfw_ring = NULL;
 523                 if (cfw_ringfull) {
 524                         cfw_evdrops += cfw_ringsize;
 525                 } else if (cfw_ringstart > cfw_ringend) {
 526                         cfw_evdrops += cfw_ringend +
 527                             (cfw_ringsize - cfw_ringstart);
 528                 } else {
 529                         cfw_evdrops += cfw_ringend - cfw_ringstart;
 530                 }
 531                 cfw_ringsize = cfw_ringmask = cfw_ringstart = cfw_ringend = 0;
 532                 cfw_ringfull = B_FALSE;
 533 
 534                 if (newsize == IPF_CFW_RING_DESTROY)
 535                         return (0);
 536                 /*
 537                  * Keep the reports & drops around because if we're just
 538                  * resizing, we need to know what we lost.
 539                  */
 540         }
 541 
 542         ASSERT(ISP2(newsize));
 543         cfw_ring = kmem_alloc(newsize * sizeof (cfwev_t), KM_SLEEP);
 544         /* KM_SLEEP means we always succeed. */
 545         cfw_ringsize = newsize;
 546         cfw_ringmask = cfw_ringsize - 1;
 547 
 548         return (0);
 549 }
 550 
 551 /* ARGSUSED */
 552 int
 553 ipf_cfwlog_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *cp,
 554     int *rp)
 555 {
 556         ipfcfwcfg_t cfginfo;
 557         int error;
 558 
 559         if (cmd != SIOCIPFCFWCFG && cmd != SIOCIPFCFWNEWSZ)
 560                 return (EIO);
 561 
 562         if (crgetzoneid(cp) != GLOBAL_ZONEID)
 563                 return (EACCES);
 564 

 565         error = COPYIN((caddr_t)data, (caddr_t)&cfginfo, sizeof (cfginfo));
 566         if (error != 0)
 567                 return (EFAULT);


 568 
 569         cfginfo.ipfcfwc_maxevsize = sizeof (cfwev_t);
 570         mutex_enter(&cfw_ringlock);
 571         cfginfo.ipfcfwc_evreports = cfw_evreports;
 572         cfginfo.ipfcfwc_evdrops = cfw_evdrops;
 573         if (cmd == SIOCIPFCFWNEWSZ) {
 574                 uint32_t newsize = cfginfo.ipfcfwc_evringsize;
 575 
 576                 /* Do ioctl parameter checking here, then call the resizer. */
 577                 if (newsize < IPF_CFW_MIN_RING_BUFS ||
 578                     newsize > IPF_CFW_MAX_RING_BUFS || !ISP2(newsize)) {
 579                         error = EINVAL;
 580                 } else {
 581                         error = ipf_cfw_ring_resize(cfginfo.ipfcfwc_evringsize);
 582                 }
 583         } else {
 584                 error = 0;
 585         }
 586         cfginfo.ipfcfwc_evringsize = cfw_ringsize;
 587         mutex_exit(&cfw_ringlock);
 588 
 589         if (error != 0)
 590                 return (error);
 591 
 592         error = COPYOUT((caddr_t)&cfginfo, (caddr_t)data, sizeof (cfginfo));
 593         if (error != 0)
 594                 return (EFAULT);
 595 
 596         return (0);
 597 }
 598 
 599 /* ARGSUSED */
 600 int
 601 ipf_cfwlog_read(dev_t dev, struct uio *uio, cred_t *cp)
 602 {
 603         uint_t requested, consumed;
 604         uio_error_t ue = {uio, 0};
 605         boolean_t block;
 606 
 607         if (uio->uio_resid == 0)
 608                 return (0);
 609         if (uio->uio_resid < sizeof (cfwev_t))
 610                 return (EINVAL);
 611         /* XXX KEBE ASKS: Check for resid being too big?!? */


 626                 /* No data available. */
 627                 ue.ue_error = EWOULDBLOCK;
 628         } else if (ue.ue_error != 0 || (block && consumed == 0)) {
 629                 /* We had a problem... */
 630                 if (ue.ue_error == 0) {
 631                         /* Cover case of cv_wait_sig() receiving a signal. */
 632                         ue.ue_error = EINTR;
 633                 }
 634                 mutex_enter(&cfw_ringlock);
 635                 DTRACE_PROBE1(ipf__cfw__uiodiscard, int, consumed);
 636                 cfw_evdrops += consumed;
 637                 mutex_exit(&cfw_ringlock);
 638         }
 639         return (ue.ue_error);
 640 }
 641 
 642 #else
 643 
 644 /* Blank stubs to satisfy userland's test compilations. */
 645 
 646 int
 647 ipf_cfw_ring_resize(uint32_t a)
 648 {
 649         return (0);
 650 }
 651 
 652 void
 653 ipf_log_cfwlog(struct ipstate *a, uint_t b, ipf_stack_t *c)
 654 {
 655 }
 656 
 657 void
 658 ipf_block_cfwlog(frentry_t *a, fr_info_t *b, ipf_stack_t *c)
 659 {
 660 }
 661 
 662 #endif  /* _KERNEL */