summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/android_je_mallinfo.c73
1 files changed, 39 insertions, 34 deletions
diff --git a/src/android_je_mallinfo.c b/src/android_je_mallinfo.c
index 8a7ff23..53bf664 100644
--- a/src/android_je_mallinfo.c
+++ b/src/android_je_mallinfo.c
@@ -14,6 +14,37 @@
* limitations under the License.
*/
+static size_t accumulate_large_allocs(arena_t* arena) {
+ size_t total_bytes = 0;
+
+ /* Accumulate the large allocation stats.
+ * Do not include stats.allocated_large, it is only updated by
+ * arena_stats_merge, and would include the data counted below.
+ */
+ for (unsigned j = 0; j < NSIZES - NBINS; j++) {
+ /* Read ndalloc first so that we guarantee nmalloc >= ndalloc. */
+ uint64_t ndalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].ndalloc);
+ uint64_t nmalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].nmalloc);
+ size_t allocs = (size_t)(nmalloc - ndalloc);
+ total_bytes += sz_index2size(NBINS + j) * allocs;
+ }
+ return total_bytes;
+}
+
+static size_t accumulate_small_allocs(arena_t* arena) {
+ size_t total_bytes = 0;
+ for (unsigned j = 0; j < NBINS; j++) {
+ bin_t* bin = &arena->bins[j];
+
+ /* NOTE: This includes allocations cached on every thread. */
+ malloc_mutex_lock(TSDN_NULL, &bin->lock);
+ total_bytes += bin_infos[j].reg_size * bin->stats.curregs;
+ malloc_mutex_unlock(TSDN_NULL, &bin->lock);
+ }
+ return total_bytes;
+}
+
+
/* Only use bin locks since the stats are now all atomic and can be read
* without taking the stats lock.
*/
@@ -27,27 +58,8 @@ struct mallinfo je_mallinfo() {
if (arena != NULL) {
mi.hblkhd += atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);
- /* Accumulate the small bins. */
- for (unsigned j = 0; j < NBINS; j++) {
- bin_t* bin = &arena->bins[j];
-
- /* NOTE: This includes allocations cached on every thread. */
- malloc_mutex_lock(TSDN_NULL, &bin->lock);
- mi.uordblks += bin_infos[j].reg_size * bin->stats.curregs;
- malloc_mutex_unlock(TSDN_NULL, &bin->lock);
- }
-
- /* Accumulate the large allocation stats.
- * Do not include stats.allocated_large, it is only updated by
- * arena_stats_merge, and would include the data counted below.
- */
- for (unsigned j = 0; j < NSIZES - NBINS; j++) {
- /* Read ndalloc first so that we guarantee nmalloc >= ndalloc. */
- uint64_t ndalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].ndalloc);
- uint64_t nmalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].nmalloc);
- size_t allocs = (size_t)(nmalloc - ndalloc);
- mi.uordblks += sz_index2size(NBINS + j) * allocs;
- }
+ mi.uordblks += accumulate_small_allocs(arena);
+ mi.uordblks += accumulate_large_allocs(arena);
}
}
malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
@@ -56,15 +68,15 @@ struct mallinfo je_mallinfo() {
return mi;
}
-size_t __mallinfo_narenas() {
+size_t je_mallinfo_narenas() {
return narenas_auto;
}
-size_t __mallinfo_nbins() {
+size_t je_mallinfo_nbins() {
return NBINS;
}
-struct mallinfo __mallinfo_arena_info(size_t aidx) {
+struct mallinfo je_mallinfo_arena_info(size_t aidx) {
struct mallinfo mi;
memset(&mi, 0, sizeof(mi));
@@ -73,22 +85,15 @@ struct mallinfo __mallinfo_arena_info(size_t aidx) {
arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE);
if (arena != NULL) {
mi.hblkhd = atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);
- mi.ordblks = atomic_load_zu(&arena->stats.allocated_large, ATOMIC_ACQUIRE);
-
- for (unsigned j = 0; j < NBINS; j++) {
- bin_t* bin = &arena->bins[j];
-
- malloc_mutex_lock(TSDN_NULL, &bin->lock);
- mi.fsmblks += bin_infos[j].reg_size * bin->stats.curregs;
- malloc_mutex_unlock(TSDN_NULL, &bin->lock);
- }
+ mi.ordblks = accumulate_large_allocs(arena);
+ mi.fsmblks = accumulate_small_allocs(arena);
}
}
malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
return mi;
}
-struct mallinfo __mallinfo_bin_info(size_t aidx, size_t bidx) {
+struct mallinfo je_mallinfo_bin_info(size_t aidx, size_t bidx) {
struct mallinfo mi;
memset(&mi, 0, sizeof(mi));