aboutsummaryrefslogtreecommitdiffstats
path: root/package
diff options
context:
space:
mode:
Diffstat (limited to 'package')
-rw-r--r--package/libs/libjson-c/Makefile2
-rw-r--r--package/libs/libjson-c/patches/001-Protect-array_list_del_idx-against-size_t-overflow.patch27
-rw-r--r--package/libs/libjson-c/patches/002-Prevent-division-by-zero-in-linkhash.patch32
-rw-r--r--package/libs/libjson-c/patches/003-Fix-integer-overflows.patch86
-rw-r--r--package/libs/libjson-c/patches/004-Issue-599-Fix-the-backwards-check-in-lh_table_insert.patch29
5 files changed, 175 insertions, 1 deletions
diff --git a/package/libs/libjson-c/Makefile b/package/libs/libjson-c/Makefile
index b61d99cc99..f02518310a 100644
--- a/package/libs/libjson-c/Makefile
+++ b/package/libs/libjson-c/Makefile
@@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk
PKG_NAME:=json-c
PKG_VERSION:=0.13.1
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-nodoc.tar.gz
PKG_SOURCE_URL:=https://s3.amazonaws.com/json-c_releases/releases/
diff --git a/package/libs/libjson-c/patches/001-Protect-array_list_del_idx-against-size_t-overflow.patch b/package/libs/libjson-c/patches/001-Protect-array_list_del_idx-against-size_t-overflow.patch
new file mode 100644
index 0000000000..456fbf35ff
--- /dev/null
+++ b/package/libs/libjson-c/patches/001-Protect-array_list_del_idx-against-size_t-overflow.patch
@@ -0,0 +1,27 @@
+From 099016b7e8d70a6d5dd814e788bba08d33d48426 Mon Sep 17 00:00:00 2001
+From: Tobias Stoeckmann <tobias@stoeckmann.org>
+Date: Mon, 4 May 2020 19:41:16 +0200
+Subject: [PATCH 1/2] Protect array_list_del_idx against size_t overflow.
+
+If the assignment of stop overflows due to idx and count being
+larger than SIZE_T_MAX in sum, out of boundary access could happen.
+
+It takes invalid usage of this function for this to happen, but
+I decided to add this check so array_list_del_idx is as safe against
+bad usage as the other arraylist functions.
+---
+ arraylist.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arraylist.c
++++ b/arraylist.c
+@@ -135,6 +135,9 @@ array_list_del_idx( struct array_list *a
+ {
+ size_t i, stop;
+
++ /* Avoid overflow in calculation with large indices. */
++ if (idx > SIZE_T_MAX - count)
++ return -1;
+ stop = idx + count;
+ if ( idx >= arr->length || stop > arr->length ) return -1;
+ for ( i = idx; i < stop; ++i ) {
diff --git a/package/libs/libjson-c/patches/002-Prevent-division-by-zero-in-linkhash.patch b/package/libs/libjson-c/patches/002-Prevent-division-by-zero-in-linkhash.patch
new file mode 100644
index 0000000000..d37fe5857b
--- /dev/null
+++ b/package/libs/libjson-c/patches/002-Prevent-division-by-zero-in-linkhash.patch
@@ -0,0 +1,32 @@
+From 77d935b7ae7871a1940cd827e850e6063044ec45 Mon Sep 17 00:00:00 2001
+From: Tobias Stoeckmann <tobias@stoeckmann.org>
+Date: Mon, 4 May 2020 19:46:45 +0200
+Subject: [PATCH 2/2] Prevent division by zero in linkhash.
+
+If a linkhash with a size of zero is created, then modulo operations
+are prone to division by zero operations.
+
+Purely protective measure against bad usage.
+---
+ linkhash.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/linkhash.c
++++ b/linkhash.c
+@@ -12,6 +12,7 @@
+
+ #include "config.h"
+
++#include <assert.h>
+ #include <stdio.h>
+ #include <string.h>
+ #include <stdlib.h>
+@@ -498,6 +499,8 @@ struct lh_table* lh_table_new(int size,
+ int i;
+ struct lh_table *t;
+
++ /* Allocate space for elements to avoid divisions by zero. */
++ assert(size > 0);
+ t = (struct lh_table*)calloc(1, sizeof(struct lh_table));
+ if (!t)
+ return NULL;
diff --git a/package/libs/libjson-c/patches/003-Fix-integer-overflows.patch b/package/libs/libjson-c/patches/003-Fix-integer-overflows.patch
new file mode 100644
index 0000000000..2fac62df59
--- /dev/null
+++ b/package/libs/libjson-c/patches/003-Fix-integer-overflows.patch
@@ -0,0 +1,86 @@
+From d07b91014986900a3a75f306d302e13e005e9d67 Mon Sep 17 00:00:00 2001
+From: Tobias Stoeckmann <tobias@stoeckmann.org>
+Date: Mon, 4 May 2020 19:47:25 +0200
+Subject: [PATCH] Fix integer overflows.
+
+The data structures linkhash and printbuf are limited to 2 GB in size
+due to a signed integer being used to track their current size.
+
+If too much data is added, then size variable can overflow, which is
+an undefined behaviour in C programming language.
+
+Assuming that a signed int overflow just leads to a negative value,
+like it happens on many sytems (Linux i686/amd64 with gcc), then
+printbuf is vulnerable to an out of boundary write on 64 bit systems.
+---
+ linkhash.c | 7 +++++--
+ printbuf.c | 19 ++++++++++++++++---
+ 2 files changed, 21 insertions(+), 5 deletions(-)
+
+--- a/linkhash.c
++++ b/linkhash.c
+@@ -579,9 +579,12 @@ int lh_table_insert_w_hash(struct lh_tab
+ {
+ unsigned long n;
+
+- if (t->count >= t->size * LH_LOAD_FACTOR)
+- if (lh_table_resize(t, t->size * 2) != 0)
++ if (t->count >= t->size * LH_LOAD_FACTOR) {
++ /* Avoid signed integer overflow with large tables. */
++ int new_size = INT_MAX / 2 < t->size ? t->size * 2 : INT_MAX;
++ if (t->size == INT_MAX || lh_table_resize(t, new_size) != 0)
+ return -1;
++ }
+
+ n = h % t->size;
+
+--- a/printbuf.c
++++ b/printbuf.c
+@@ -15,6 +15,7 @@
+
+ #include "config.h"
+
++#include <limits.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+@@ -65,9 +66,16 @@ static int printbuf_extend(struct printb
+ if (p->size >= min_size)
+ return 0;
+
+- new_size = p->size * 2;
+- if (new_size < min_size + 8)
+- new_size = min_size + 8;
++ /* Prevent signed integer overflows with large buffers. */
++ if (min_size > INT_MAX - 8)
++ return -1;
++ if (p->size > INT_MAX / 2)
++ new_size = min_size + 8;
++ else {
++ new_size = p->size * 2;
++ if (new_size < min_size + 8)
++ new_size = min_size + 8;
++ }
+ #ifdef PRINTBUF_DEBUG
+ MC_DEBUG("printbuf_memappend: realloc "
+ "bpos=%d min_size=%d old_size=%d new_size=%d\n",
+@@ -82,6 +90,9 @@ static int printbuf_extend(struct printb
+
+ int printbuf_memappend(struct printbuf *p, const char *buf, int size)
+ {
++ /* Prevent signed integer overflows with large buffers. */
++ if (size > INT_MAX - p->bpos - 1)
++ return -1;
+ if (p->size <= p->bpos + size + 1) {
+ if (printbuf_extend(p, p->bpos + size + 1) < 0)
+ return -1;
+@@ -98,6 +109,9 @@ int printbuf_memset(struct printbuf *pb,
+
+ if (offset == -1)
+ offset = pb->bpos;
++ /* Prevent signed integer overflows with large buffers. */
++ if (len > INT_MAX - offset)
++ return -1;
+ size_needed = offset + len;
+ if (pb->size < size_needed)
+ {
diff --git a/package/libs/libjson-c/patches/004-Issue-599-Fix-the-backwards-check-in-lh_table_insert.patch b/package/libs/libjson-c/patches/004-Issue-599-Fix-the-backwards-check-in-lh_table_insert.patch
new file mode 100644
index 0000000000..aed6918e70
--- /dev/null
+++ b/package/libs/libjson-c/patches/004-Issue-599-Fix-the-backwards-check-in-lh_table_insert.patch
@@ -0,0 +1,29 @@
+From 519dfe1591d85432986f9762d41d1a883198c157 Mon Sep 17 00:00:00 2001
+From: Eric Haszlakiewicz <erh+git@nimenees.com>
+Date: Sun, 10 May 2020 03:32:19 +0000
+Subject: [PATCH] Issue #599: Fix the backwards check in
+ lh_table_insert_w_hash() that was preventing adding more than 11 objects. Add
+ a test to check for this too.
+
+---
+ linkhash.c | 2 +-
+ tests/test4.c | 29 +++++++++++++++++++++++++++++
+ tests/test4.expected | 1 +
+ 3 files changed, 31 insertions(+), 1 deletion(-)
+
+diff --git a/linkhash.c b/linkhash.c
+index 51e90b1..f930efd 100644
+--- a/linkhash.c
++++ b/linkhash.c
+@@ -582,7 +582,7 @@ int lh_table_insert_w_hash(struct lh_table *t, const void *k, const void *v, con
+
+ if (t->count >= t->size * LH_LOAD_FACTOR) {
+ /* Avoid signed integer overflow with large tables. */
+- int new_size = INT_MAX / 2 < t->size ? t->size * 2 : INT_MAX;
++ int new_size = (t->size > INT_MAX / 2) ? INT_MAX : (t->size * 2);
+ if (t->size == INT_MAX || lh_table_resize(t, new_size) != 0)
+ return -1;
+ }
+--
+2.26.2
+