11586 cp = cpu_list;
11587
11588 do {
11589 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11590 continue;
11591
11592 buf = &bufs[cp->cpu_id];
11593
11594 /*
11595 * If there is already a buffer allocated for this CPU, it
11596 * is only possible that this is a DR event. In this case,
11597 * the buffer size must match our specified size.
11598 */
11599 if (buf->dtb_tomax != NULL) {
11600 ASSERT(buf->dtb_size == size);
11601 continue;
11602 }
11603
11604 ASSERT(buf->dtb_xamot == NULL);
11605
11606 if ((buf->dtb_tomax = kmem_zalloc(size,
11607 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11608 goto err;
11609
11610 buf->dtb_size = size;
11611 buf->dtb_flags = flags;
11612 buf->dtb_offset = 0;
11613 buf->dtb_drops = 0;
11614
11615 if (flags & DTRACEBUF_NOSWITCH)
11616 continue;
11617
11618 if ((buf->dtb_xamot = kmem_zalloc(size,
11619 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11620 goto err;
11621 } while ((cp = cp->cpu_next) != cpu_list);
11622
11623 return (0);
11624
11625 err:
11626 cp = cpu_list;
11627
11628 do {
11629 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11630 continue;
11631
11632 buf = &bufs[cp->cpu_id];
11633 desired += 2;
11634
11635 if (buf->dtb_xamot != NULL) {
11636 ASSERT(buf->dtb_tomax != NULL);
11637 ASSERT(buf->dtb_size == size);
11638 kmem_free(buf->dtb_xamot, size);
11639 allocated++;
13547 {
13548 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
13549 void *base;
13550 uintptr_t limit;
13551 dtrace_dynvar_t *dvar, *next, *start;
13552 int i;
13553
13554 ASSERT(MUTEX_HELD(&dtrace_lock));
13555 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
13556
13557 bzero(dstate, sizeof (dtrace_dstate_t));
13558
13559 if ((dstate->dtds_chunksize = chunksize) == 0)
13560 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
13561
13562 VERIFY(dstate->dtds_chunksize < LONG_MAX);
13563
13564 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
13565 size = min;
13566
13567 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL)
13568 return (ENOMEM);
13569
13570 dstate->dtds_size = size;
13571 dstate->dtds_base = base;
13572 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
13573 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
13574
13575 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
13576
13577 if (hashsize != 1 && (hashsize & 1))
13578 hashsize--;
13579
13580 dstate->dtds_hashsize = hashsize;
13581 dstate->dtds_hash = dstate->dtds_base;
13582
13583 /*
13584 * Set all of our hash buckets to point to the single sink, and (if
13585 * it hasn't already been set), set the sink's hash value to be the
13586 * sink sentinel value. The sink is needed for dynamic variable
13587 * lookups to know that they have iterated over an entire, valid hash
14074 rval = EACCES;
14075 goto out;
14076 }
14077
14078 dtrace_state_prereserve(state);
14079
14080 /*
14081 * Now we want to do is try to allocate our speculations.
14082 * We do not automatically resize the number of speculations; if
14083 * this fails, we will fail the operation.
14084 */
14085 nspec = opt[DTRACEOPT_NSPEC];
14086 ASSERT(nspec != DTRACEOPT_UNSET);
14087
14088 if (nspec > INT_MAX) {
14089 rval = ENOMEM;
14090 goto out;
14091 }
14092
14093 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t),
14094 KM_NOSLEEP | KM_NORMALPRI);
14095
14096 if (spec == NULL) {
14097 rval = ENOMEM;
14098 goto out;
14099 }
14100
14101 state->dts_speculations = spec;
14102 state->dts_nspeculations = (int)nspec;
14103
14104 for (i = 0; i < nspec; i++) {
14105 if ((buf = kmem_zalloc(bufsize,
14106 KM_NOSLEEP | KM_NORMALPRI)) == NULL) {
14107 rval = ENOMEM;
14108 goto err;
14109 }
14110
14111 spec[i].dtsp_buffer = buf;
14112 }
14113
14114 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
14115 if (dtrace_anon.dta_state == NULL) {
14116 rval = ENOENT;
14117 goto out;
14118 }
14119
14120 if (state->dts_necbs != 0) {
14121 rval = EALREADY;
14122 goto out;
14123 }
14124
14125 state->dts_anon = dtrace_anon_grab();
14126 ASSERT(state->dts_anon != NULL);
|
11586 cp = cpu_list;
11587
11588 do {
11589 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11590 continue;
11591
11592 buf = &bufs[cp->cpu_id];
11593
11594 /*
11595 * If there is already a buffer allocated for this CPU, it
11596 * is only possible that this is a DR event. In this case,
11597 * the buffer size must match our specified size.
11598 */
11599 if (buf->dtb_tomax != NULL) {
11600 ASSERT(buf->dtb_size == size);
11601 continue;
11602 }
11603
11604 ASSERT(buf->dtb_xamot == NULL);
11605
11606 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP_LAZY)) ==
11607 NULL)
11608 goto err;
11609
11610 buf->dtb_size = size;
11611 buf->dtb_flags = flags;
11612 buf->dtb_offset = 0;
11613 buf->dtb_drops = 0;
11614
11615 if (flags & DTRACEBUF_NOSWITCH)
11616 continue;
11617
11618 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP_LAZY)) ==
11619 NULL)
11620 goto err;
11621 } while ((cp = cp->cpu_next) != cpu_list);
11622
11623 return (0);
11624
11625 err:
11626 cp = cpu_list;
11627
11628 do {
11629 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11630 continue;
11631
11632 buf = &bufs[cp->cpu_id];
11633 desired += 2;
11634
11635 if (buf->dtb_xamot != NULL) {
11636 ASSERT(buf->dtb_tomax != NULL);
11637 ASSERT(buf->dtb_size == size);
11638 kmem_free(buf->dtb_xamot, size);
11639 allocated++;
13547 {
13548 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
13549 void *base;
13550 uintptr_t limit;
13551 dtrace_dynvar_t *dvar, *next, *start;
13552 int i;
13553
13554 ASSERT(MUTEX_HELD(&dtrace_lock));
13555 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
13556
13557 bzero(dstate, sizeof (dtrace_dstate_t));
13558
13559 if ((dstate->dtds_chunksize = chunksize) == 0)
13560 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
13561
13562 VERIFY(dstate->dtds_chunksize < LONG_MAX);
13563
13564 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
13565 size = min;
13566
13567 if ((base = kmem_zalloc(size, KM_NOSLEEP_LAZY)) == NULL)
13568 return (ENOMEM);
13569
13570 dstate->dtds_size = size;
13571 dstate->dtds_base = base;
13572 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
13573 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
13574
13575 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
13576
13577 if (hashsize != 1 && (hashsize & 1))
13578 hashsize--;
13579
13580 dstate->dtds_hashsize = hashsize;
13581 dstate->dtds_hash = dstate->dtds_base;
13582
13583 /*
13584 * Set all of our hash buckets to point to the single sink, and (if
13585 * it hasn't already been set), set the sink's hash value to be the
13586 * sink sentinel value. The sink is needed for dynamic variable
13587 * lookups to know that they have iterated over an entire, valid hash
14074 rval = EACCES;
14075 goto out;
14076 }
14077
14078 dtrace_state_prereserve(state);
14079
14080 /*
14081 * Now we want to do is try to allocate our speculations.
14082 * We do not automatically resize the number of speculations; if
14083 * this fails, we will fail the operation.
14084 */
14085 nspec = opt[DTRACEOPT_NSPEC];
14086 ASSERT(nspec != DTRACEOPT_UNSET);
14087
14088 if (nspec > INT_MAX) {
14089 rval = ENOMEM;
14090 goto out;
14091 }
14092
14093 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t),
14094 KM_NOSLEEP_LAZY);
14095
14096 if (spec == NULL) {
14097 rval = ENOMEM;
14098 goto out;
14099 }
14100
14101 state->dts_speculations = spec;
14102 state->dts_nspeculations = (int)nspec;
14103
14104 for (i = 0; i < nspec; i++) {
14105 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP_LAZY)) == NULL) {
14106 rval = ENOMEM;
14107 goto err;
14108 }
14109
14110 spec[i].dtsp_buffer = buf;
14111 }
14112
14113 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
14114 if (dtrace_anon.dta_state == NULL) {
14115 rval = ENOENT;
14116 goto out;
14117 }
14118
14119 if (state->dts_necbs != 0) {
14120 rval = EALREADY;
14121 goto out;
14122 }
14123
14124 state->dts_anon = dtrace_anon_grab();
14125 ASSERT(state->dts_anon != NULL);
|