| 1 | diff -u -r ../elfutils-0.159/libelf/elf_getarsym.c ./libelf/elf_getarsym.c |
| 2 | --- ../elfutils-0.159/libelf/elf_getarsym.c 2014-05-18 16:32:15.000000000 +0200 |
| 3 | +++ ./libelf/elf_getarsym.c 2014-05-30 14:53:58.602211085 +0200 |
| 4 | @@ -45,6 +45,124 @@ |
| 5 | #include <dl-hash.h> |
| 6 | #include "libelfP.h" |
| 7 | |
| 8 | +#ifdef __ANDROID__ |
| 9 | +/* Find the first occurrence of C in S. */ |
| 10 | +void * |
| 11 | +rawmemchr (const void *s, int c_in) |
| 12 | +{ |
| 13 | + /* On 32-bit hardware, choosing longword to be a 32-bit unsigned |
| 14 | + long instead of a 64-bit uintmax_t tends to give better |
| 15 | + performance. On 64-bit hardware, unsigned long is generally 64 |
| 16 | + bits already. Change this typedef to experiment with |
| 17 | + performance. */ |
| 18 | + typedef unsigned long int longword; |
| 19 | + |
| 20 | + const unsigned char *char_ptr; |
| 21 | + const longword *longword_ptr; |
| 22 | + longword repeated_one; |
| 23 | + longword repeated_c; |
| 24 | + unsigned char c; |
| 25 | + |
| 26 | + c = (unsigned char) c_in; |
| 27 | + |
| 28 | + /* Handle the first few bytes by reading one byte at a time. |
| 29 | + Do this until CHAR_PTR is aligned on a longword boundary. */ |
| 30 | + for (char_ptr = (const unsigned char *) s; |
| 31 | + (size_t) char_ptr % sizeof (longword) != 0; |
| 32 | + ++char_ptr) |
| 33 | + if (*char_ptr == c) |
| 34 | + return (void *) char_ptr; |
| 35 | + |
| 36 | + longword_ptr = (const longword *) char_ptr; |
| 37 | + |
| 38 | + /* All these elucidatory comments refer to 4-byte longwords, |
| 39 | + but the theory applies equally well to any size longwords. */ |
| 40 | + |
| 41 | + /* Compute auxiliary longword values: |
| 42 | + repeated_one is a value which has a 1 in every byte. |
| 43 | + repeated_c has c in every byte. */ |
| 44 | + repeated_one = 0x01010101; |
| 45 | + repeated_c = c | (c << 8); |
| 46 | + repeated_c |= repeated_c << 16; |
| 47 | + if (0xffffffffU < (longword) -1) |
| 48 | + { |
| 49 | + repeated_one |= repeated_one << 31 << 1; |
| 50 | + repeated_c |= repeated_c << 31 << 1; |
| 51 | + if (8 < sizeof (longword)) |
| 52 | + { |
| 53 | + size_t i; |
| 54 | + |
| 55 | + for (i = 64; i < sizeof (longword) * 8; i *= 2) |
| 56 | + { |
| 57 | + repeated_one |= repeated_one << i; |
| 58 | + repeated_c |= repeated_c << i; |
| 59 | + } |
| 60 | + } |
| 61 | + } |
| 62 | + |
| 63 | + /* Instead of the traditional loop which tests each byte, we will |
| 64 | + test a longword at a time. The tricky part is testing if *any of |
| 65 | + the four* bytes in the longword in question are equal to NUL or |
| 66 | + c. We first use an xor with repeated_c. This reduces the task |
| 67 | + to testing whether *any of the four* bytes in longword1 is zero. |
| 68 | + |
| 69 | + We compute tmp = |
| 70 | + ((longword1 - repeated_one) & ~longword1) & (repeated_one << 7). |
| 71 | + That is, we perform the following operations: |
| 72 | + 1. Subtract repeated_one. |
| 73 | + 2. & ~longword1. |
| 74 | + 3. & a mask consisting of 0x80 in every byte. |
| 75 | + Consider what happens in each byte: |
| 76 | + - If a byte of longword1 is zero, step 1 and 2 transform it into 0xff, |
| 77 | + and step 3 transforms it into 0x80. A carry can also be propagated |
| 78 | + to more significant bytes. |
| 79 | + - If a byte of longword1 is nonzero, let its lowest 1 bit be at |
| 80 | + position k (0 <= k <= 7); so the lowest k bits are 0. After step 1, |
| 81 | + the byte ends in a single bit of value 0 and k bits of value 1. |
| 82 | + After step 2, the result is just k bits of value 1: 2^k - 1. After |
| 83 | + step 3, the result is 0. And no carry is produced. |
| 84 | + So, if longword1 has only non-zero bytes, tmp is zero. |
| 85 | + Whereas if longword1 has a zero byte, call j the position of the least |
| 86 | + significant zero byte. Then the result has a zero at positions 0, ..., |
| 87 | + j-1 and a 0x80 at position j. We cannot predict the result at the more |
| 88 | + significant bytes (positions j+1..3), but it does not matter since we |
| 89 | + already have a non-zero bit at position 8*j+7. |
| 90 | + |
| 91 | + The test whether any byte in longword1 is zero is equivalent |
| 92 | + to testing whether tmp is nonzero. |
| 93 | + |
| 94 | + This test can read beyond the end of a string, depending on where |
| 95 | + C_IN is encountered. However, this is considered safe since the |
| 96 | + initialization phase ensured that the read will be aligned, |
| 97 | + therefore, the read will not cross page boundaries and will not |
| 98 | + cause a fault. */ |
| 99 | + |
| 100 | + while (1) |
| 101 | + { |
| 102 | + longword longword1 = *longword_ptr ^ repeated_c; |
| 103 | + |
| 104 | + if ((((longword1 - repeated_one) & ~longword1) |
| 105 | + & (repeated_one << 7)) != 0) |
| 106 | + break; |
| 107 | + longword_ptr++; |
| 108 | + } |
| 109 | + |
| 110 | + char_ptr = (const unsigned char *) longword_ptr; |
| 111 | + |
| 112 | + /* At this point, we know that one of the sizeof (longword) bytes |
| 113 | + starting at char_ptr is == c. On little-endian machines, we |
| 114 | + could determine the first such byte without any further memory |
| 115 | + accesses, just by looking at the tmp result from the last loop |
| 116 | + iteration. But this does not work on big-endian machines. |
| 117 | + Choose code that works in both cases. */ |
| 118 | + |
| 119 | + char_ptr = (unsigned char *) longword_ptr; |
| 120 | + while (*char_ptr != c) |
| 121 | + char_ptr++; |
| 122 | + return (void *) char_ptr; |
| 123 | +} |
| 124 | +#endif |
| 125 | + |
| 126 | |
| 127 | static int |
| 128 | read_number_entries (uint64_t *nump, Elf *elf, size_t *offp, bool index64_p) |
| 129 | @@ -166,7 +284,7 @@ |
| 130 | |
| 131 | /* We have an archive. The first word in there is the number of |
| 132 | entries in the table. */ |
| 133 | - uint64_t n; |
| 134 | + uint64_t n = 0; |
| 135 | size_t off = elf->start_offset + SARMAG + sizeof (struct ar_hdr); |
| 136 | if (read_number_entries (&n, elf, &off, index64_p) < 0) |
| 137 | { |