com php-src: Update alloc patch: Zend/zend_alloc.c

  109033
October 23, 2019 21:31 rasmus@php.net (Rasmus Lerdorf)
Commit:    5870efbcf5235bb7328fe7cea3b8e2b92fb9fc0d
Author:    Rasmus Lerdorf <rasmus@lerdorf.com>         Wed, 23 Oct 2019 14:31:27 -0700
Parents:   c744531fff9ee03c027ca3c18b21f3382023ff7e
Branches:  PHP-7.4

Link:       http://git.php.net/?p=php-src.git;a=commitdiff;h=5870efbcf5235bb7328fe7cea3b8e2b92fb9fc0d

Log:
Update alloc patch

Changed paths:
  M  Zend/zend_alloc.c


Diff:
diff --git a/Zend/zend_alloc.c b/Zend/zend_alloc.c
index 21ccf850496..a1d3ad680fa 100644
--- a/Zend/zend_alloc.c
+++ b/Zend/zend_alloc.c
@@ -195,6 +195,11 @@ typedef struct  _zend_mm_free_slot zend_mm_free_slot;
 typedef struct  _zend_mm_chunk     zend_mm_chunk;
 typedef struct  _zend_mm_huge_list zend_mm_huge_list;
 
+/*
+ * 0 means disabled
+ * 1 means huge pages
+ * 2 means transparent huge pages
+ */
 int zend_mm_use_huge_pages = 0;
 
 /*
@@ -229,6 +234,13 @@ int zend_mm_use_huge_pages = 0;
  *               2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
  */
 
+/*
+ * For environments where mmap is expensive it can be
+ * worthwhile to avoid mmap/munmap churn by raising
+ * the minimum number of chunks in emalloc
+ */
+int zend_mm_min_chunks = 0;
+
 struct _zend_mm_heap {
 #if ZEND_MM_CUSTOM
 	int                use_custom_heap;
@@ -462,7 +474,7 @@ static void *zend_mm_mmap(size_t size)
 	void *ptr;
 
 #ifdef MAP_HUGETLB
-	if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
+	if (zend_mm_use_huge_pages == 1 && size == ZEND_MM_CHUNK_SIZE) {
 		ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_HUGETLB, -1, 0);
 		if (ptr != MAP_FAILED) {
 			return ptr;
@@ -669,7 +681,7 @@ static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
 		return NULL;
 	} else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
 #ifdef MADV_HUGEPAGE
-		if (zend_mm_use_huge_pages) {
+		if (zend_mm_use_huge_pages == 2) {
 			madvise(ptr, size, MADV_HUGEPAGE);
 		}
 #endif
@@ -702,7 +714,7 @@ static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
 			zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
 		}
 # ifdef MADV_HUGEPAGE
-		if (zend_mm_use_huge_pages) {
+		if (zend_mm_use_huge_pages == 2) {
 			madvise(ptr, size, MADV_HUGEPAGE);
 		}
 # endif
@@ -2270,7 +2282,7 @@ void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
 		zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
 	} else {
 		/* free some cached chunks to keep average count */
-		heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
+		heap->avg_chunks_count = MAX((heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0, zend_mm_min_chunks);
 		while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
 		       heap->cached_chunks) {
 			p = heap->cached_chunks;
@@ -2278,6 +2290,7 @@ void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
 			zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
 			heap->cached_chunks_count--;
 		}
+
 		/* clear cached chunks */
 		p = heap->cached_chunks;
 		while (p != NULL) {
@@ -2759,8 +2772,16 @@ static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
 #endif
 
 	tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
-	if (tmp && zend_atoi(tmp, 0)) {
-		zend_mm_use_huge_pages = 1;
+    if (tmp) {
+		zend_mm_use_huge_pages = zend_atoi(tmp, 0);
+		if (zend_mm_use_huge_pages > 2) {
+			zend_mm_use_huge_pages = 1;
+		}
+	}
+
+	tmp = getenv("USE_ZEND_MIN_CHUNKS");
+	if (tmp) {
+		zend_mm_min_chunks = zend_atoi(tmp, 0);
 	}
 	alloc_globals->mm_heap = zend_mm_init();
 }
October 23, 2019 21:33 rasmus@lerdorf.com (Rasmus Lerdorf)
Crap, brain freeze. Pushed this patch from the wrong window. Reverting.

-Rasmus

On Wed, Oct 23, 2019 at 2:31 PM Rasmus Lerdorf <rasmus@php.net> wrote:

> Commit: 5870efbcf5235bb7328fe7cea3b8e2b92fb9fc0d > Author: Rasmus Lerdorf <rasmus@lerdorf.com> Wed, 23 Oct 2019 > 14:31:27 -0700 > Parents: c744531fff9ee03c027ca3c18b21f3382023ff7e > Branches: PHP-7.4 > > Link: > http://git.php.net/?p=php-src.git;a=commitdiff;h=5870efbcf5235bb7328fe7cea3b8e2b92fb9fc0d > > Log: > Update alloc patch > > Changed paths: > M Zend/zend_alloc.c > > > Diff: > diff --git a/Zend/zend_alloc.c b/Zend/zend_alloc.c > index 21ccf850496..a1d3ad680fa 100644 > --- a/Zend/zend_alloc.c > +++ b/Zend/zend_alloc.c > @@ -195,6 +195,11 @@ typedef struct _zend_mm_free_slot zend_mm_free_slot; > typedef struct _zend_mm_chunk zend_mm_chunk; > typedef struct _zend_mm_huge_list zend_mm_huge_list; > > +/* > + * 0 means disabled > + * 1 means huge pages > + * 2 means transparent huge pages > + */ > int zend_mm_use_huge_pages = 0; > > /* > @@ -229,6 +234,13 @@ int zend_mm_use_huge_pages = 0; > * 2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h > */ > > +/* > + * For environments where mmap is expensive it can be > + * worthwhile to avoid mmap/munmap churn by raising > + * the minimum number of chunks in emalloc > + */ > +int zend_mm_min_chunks = 0; > + > struct _zend_mm_heap { > #if ZEND_MM_CUSTOM > int use_custom_heap; > @@ -462,7 +474,7 @@ static void *zend_mm_mmap(size_t size) > void *ptr; > > #ifdef MAP_HUGETLB > - if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) { > + if (zend_mm_use_huge_pages == 1 && size == ZEND_MM_CHUNK_SIZE) { > ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE > | MAP_ANON | MAP_HUGETLB, -1, 0); > if (ptr != MAP_FAILED) { > return ptr; > @@ -669,7 +681,7 @@ static void *zend_mm_chunk_alloc_int(size_t size, > size_t alignment) > return NULL; > } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) { > #ifdef MADV_HUGEPAGE > - if (zend_mm_use_huge_pages) { > + if (zend_mm_use_huge_pages == 2) { > madvise(ptr, size, MADV_HUGEPAGE); > } > #endif > @@ -702,7 +714,7 @@ static void *zend_mm_chunk_alloc_int(size_t size, > size_t alignment) > zend_mm_munmap((char*)ptr + size, alignment - > REAL_PAGE_SIZE); > } > # ifdef MADV_HUGEPAGE > - if (zend_mm_use_huge_pages) { > + if (zend_mm_use_huge_pages == 2) { > madvise(ptr, size, MADV_HUGEPAGE); > } > # endif > @@ -2270,7 +2282,7 @@ void zend_mm_shutdown(zend_mm_heap *heap, int full, > int silent) > zend_mm_chunk_free(heap, heap->main_chunk, > ZEND_MM_CHUNK_SIZE); > } else { > /* free some cached chunks to keep average count */ > - heap->avg_chunks_count = (heap->avg_chunks_count + > (double)heap->peak_chunks_count) / 2.0; > + heap->avg_chunks_count = MAX((heap->avg_chunks_count + > (double)heap->peak_chunks_count) / 2.0, zend_mm_min_chunks); > while ((double)heap->cached_chunks_count + 0.9 > > heap->avg_chunks_count && > heap->cached_chunks) { > p = heap->cached_chunks; > @@ -2278,6 +2290,7 @@ void zend_mm_shutdown(zend_mm_heap *heap, int full, > int silent) > zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE); > heap->cached_chunks_count--; > } > + > /* clear cached chunks */ > p = heap->cached_chunks; > while (p != NULL) { > @@ -2759,8 +2772,16 @@ static void alloc_globals_ctor(zend_alloc_globals > *alloc_globals) > #endif > > tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES"); > - if (tmp && zend_atoi(tmp, 0)) { > - zend_mm_use_huge_pages = 1; > + if (tmp) { > + zend_mm_use_huge_pages = zend_atoi(tmp, 0); > + if (zend_mm_use_huge_pages > 2) { > + zend_mm_use_huge_pages = 1; > + } > + } > + > + tmp = getenv("USE_ZEND_MIN_CHUNKS"); > + if (tmp) { > + zend_mm_min_chunks = zend_atoi(tmp, 0); > } > alloc_globals->mm_heap = zend_mm_init(); > } > > > -- > PHP CVS Mailing List (http://www.php.net/) > To unsubscribe, visit: http://www.php.net/unsub.php > >