Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/games/monop use an sbrk() only malloc() because save and res...



details:   https://anonhg.NetBSD.org/src/rev/49279f81e470
branches:  trunk
changeset: 546146:49279f81e470
user:      christos <christos%NetBSD.org@localhost>
date:      Mon Apr 21 01:23:06 2003 +0000

description:
use an sbrk() only malloc() because save and restore depends on saving
the data segment only. Maybe we should make libbsdmalloc?

diffstat:

 games/monop/Makefile |    6 +-
 games/monop/malloc.c |  497 +++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 500 insertions(+), 3 deletions(-)

diffs (truncated from 519 to 300 lines):

diff -r 10a1174ea689 -r 49279f81e470 games/monop/Makefile
--- a/games/monop/Makefile      Mon Apr 21 00:55:17 2003 +0000
+++ b/games/monop/Makefile      Mon Apr 21 01:23:06 2003 +0000
@@ -1,11 +1,11 @@
-#      $NetBSD: Makefile,v 1.22 2002/03/05 21:30:41 thorpej Exp $
+#      $NetBSD: Makefile,v 1.23 2003/04/21 01:23:07 christos Exp $
 #      @(#)Makefile    8.1 (Berkeley) 5/31/93
 
 .include <bsd.own.mk>
 
 PROG=  monop
-SRCS=  monop.c cards.c execute.c getinp.c houses.c jail.c misc.c morg.c \
-       print.c prop.c rent.c roll.c spec.c trade.c
+SRCS=  monop.c cards.c execute.c getinp.c houses.c jail.c malloc.c misc.c \
+       morg.c print.c prop.c rent.c roll.c spec.c trade.c
 MAN=   monop.6
 HIDEGAME=hidegame
 CLEANFILES+=initdeck cards.pck
diff -r 10a1174ea689 -r 49279f81e470 games/monop/malloc.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/games/monop/malloc.c      Mon Apr 21 01:23:06 2003 +0000
@@ -0,0 +1,497 @@
+/*     $NetBSD: malloc.c,v 1.1 2003/04/21 01:23:06 christos Exp $      */
+
+/*
+ * Copyright (c) 1983, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *     This product includes software developed by the University of
+ *     California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)malloc.c   8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: malloc.c,v 1.1 2003/04/21 01:23:06 christos Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * malloc.c (Caltech) 2/21/82
+ * Chris Kingsley, kingsley@cit-20.
+ *
+ * This is a very fast storage allocator.  It allocates blocks of a small 
+ * number of different sizes, and keeps free lists of each size.  Blocks that
+ * don't exactly fit are passed up to the next larger size.  In this 
+ * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long.
+ * This is designed for use in a virtual memory environment.
+ */
+
+#include <sys/types.h>
+#if defined(DEBUG) || defined(RCHECK)
+#include <sys/uio.h>
+#endif
+#if defined(RCHECK) || defined(MSTATS)
+#include <stdio.h>
+#endif
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <threadlib.h>
+
+
+/*
+ * The overhead on a block is at least 4 bytes.  When free, this space
+ * contains a pointer to the next free block, and the bottom two bits must
+ * be zero.  When in use, the first byte is set to MAGIC, and the second
+ * byte is the size index.  The remaining bytes are for alignment.
+ * If range checking is enabled then a second word holds the size of the
+ * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
+ * The order of elements is critical: ov_magic must overlay the low order
+ * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
+ */
+union  overhead {
+       union   overhead *ov_next;      /* when free */
+       struct {
+               u_char  ovu_magic;      /* magic number */
+               u_char  ovu_index;      /* bucket # */
+#ifdef RCHECK
+               u_short ovu_rmagic;     /* range magic number */
+               u_long  ovu_size;       /* actual block size */
+#endif
+       } ovu;
+#define        ov_magic        ovu.ovu_magic
+#define        ov_index        ovu.ovu_index
+#define        ov_rmagic       ovu.ovu_rmagic
+#define        ov_size         ovu.ovu_size
+};
+
+#define        MAGIC           0xef            /* magic # on accounting info */
+#ifdef RCHECK
+#define RMAGIC         0x5555          /* magic # on range info */
+#endif
+
+#ifdef RCHECK
+#define        RSLOP           sizeof (u_short)
+#else
+#define        RSLOP           0
+#endif
+
+/*
+ * nextf[i] is the pointer to the next free block of size 2^(i+3).  The
+ * smallest allocatable block is 8 bytes.  The overhead information
+ * precedes the data area returned to the user.
+ */
+#define        NBUCKETS 30
+static union overhead *nextf[NBUCKETS];
+
+static long pagesz;                    /* page size */
+static int pagebucket;                 /* page size bucket */
+
+#ifdef MSTATS
+/*
+ * nmalloc[i] is the difference between the number of mallocs and frees
+ * for a given block size.
+ */
+static u_int nmalloc[NBUCKETS];
+#endif
+
+static mutex_t malloc_mutex = MUTEX_INITIALIZER;
+
+static void morecore __P((int));
+static int findbucket __P((union overhead *, int));
+#ifdef MSTATS
+void mstats __P((const char *));
+#endif
+
+#if defined(DEBUG) || defined(RCHECK)
+#define        ASSERT(p)   if (!(p)) botch(__STRING(p))
+
+static void botch __P((const char *));
+
+/*
+ * NOTE: since this may be called while malloc_mutex is locked, stdio must not
+ *       be used in this function.
+ */
+static void
+botch(s)
+       const char *s;
+{
+       struct iovec iov[3];
+
+       iov[0].iov_base = "\nassertion botched: ";
+       iov[0].iov_len  = 20;
+       iov[1].iov_base = (void *)s;
+       iov[1].iov_len  = strlen(s);
+       iov[2].iov_base = "\n";
+       iov[2].iov_len  = 1;
+
+       /*
+        * This place deserves a word of warning: a cancellation point will
+        * occur when executing writev(), and we might be still owning
+        * malloc_mutex.  At this point we need to disable cancellation
+        * until `after' abort() because i) establishing a cancellation handler
+        * might, depending on the implementation, result in another malloc()
+        * to be executed, and ii) it is really not desirable to let execution
+        * continue.  `Fix me.'
+        * 
+        * Note that holding mutex_lock during abort() is safe.
+        */
+
+       (void)writev(STDERR_FILENO, iov, 3);
+       abort();
+}
+#else
+#define        ASSERT(p)
+#endif
+
+void *
+malloc(nbytes)
+       size_t nbytes;
+{
+       union overhead *op;
+       int bucket;
+       long n;
+       unsigned amt;
+
+       mutex_lock(&malloc_mutex);
+
+       /*
+        * First time malloc is called, setup page size and
+        * align break pointer so all data will be page aligned.
+        */
+       if (pagesz == 0) {
+               pagesz = n = getpagesize();
+               ASSERT(pagesz > 0);
+               op = (union overhead *)(void *)sbrk(0);
+               n = n - sizeof (*op) - ((long)op & (n - 1));
+               if (n < 0)
+                       n += pagesz;
+               if (n) {
+                       if (sbrk((int)n) == (void *)-1) {
+                               mutex_unlock(&malloc_mutex);
+                               return (NULL);
+                       }
+               }
+               bucket = 0;
+               amt = 8;
+               while (pagesz > amt) {
+                       amt <<= 1;
+                       bucket++;
+               }
+               pagebucket = bucket;
+       }
+       /*
+        * Convert amount of memory requested into closest block size
+        * stored in hash buckets which satisfies request.
+        * Account for space used per block for accounting.
+        */
+       if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) {
+#ifndef RCHECK
+               amt = 8;        /* size of first bucket */
+               bucket = 0;
+#else
+               amt = 16;       /* size of first bucket */
+               bucket = 1;
+#endif
+               n = -((long)sizeof (*op) + RSLOP);
+       } else {
+               amt = (unsigned)pagesz;
+               bucket = pagebucket;
+       }
+       while (nbytes > amt + n) {
+               amt <<= 1;
+               if (amt == 0)
+                       return (NULL);
+               bucket++;
+       }
+       /*
+        * If nothing in hash bucket right now,
+        * request more memory from the system.
+        */
+       if ((op = nextf[bucket]) == NULL) {
+               morecore(bucket);
+               if ((op = nextf[bucket]) == NULL) {
+                       mutex_unlock(&malloc_mutex);
+                       return (NULL);
+               }
+       }
+       /* remove from linked list */
+       nextf[bucket] = op->ov_next;
+       op->ov_magic = MAGIC;
+       op->ov_index = bucket;
+#ifdef MSTATS
+       nmalloc[bucket]++;
+#endif
+       mutex_unlock(&malloc_mutex);
+#ifdef RCHECK
+       /*
+        * Record allocated size of block and
+        * bound space with magic numbers.
+        */
+       op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
+       op->ov_rmagic = RMAGIC;
+       *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
+#endif
+       return ((void *)(op + 1));
+}
+
+/*
+ * Allocate more memory to the indicated bucket.
+ */
+static void
+morecore(bucket)
+       int bucket;
+{
+       union overhead *op;
+       long sz;                /* size of desired block */
+       long amt;                       /* amount to allocate */
+       long nblks;                     /* how many blocks we get */
+
+       /*
+        * sbrk_size <= 0 only for big, FLUFFY, requests (about



Home | Main Index | Thread Index | Old Index