WvStreams
wvbufferstore.cc
1/*
2 * Worldvisions Weaver Software:
3 * Copyright (C) 1997-2002 Net Integration Technologies, Inc.
4 *
5 * Defines basic buffer storage classes.
6 * These are not intended for use directly by clients.
7 * See "wvbufbase.h" for the public API.
8 */
9#include "wvbufstore.h"
10#include <string.h>
11#include <sys/types.h>
12
20struct MemOps
21{
23 inline void uninit_copy(void *target, const void *source,
24 size_t count)
25 {
26 memcpy(target, source, count);
27 }
29 inline void copy(void *target, const void *source, size_t count)
30 {
31 uninit(target, count);
32 memcpy(target, source, count);
33 }
38 inline void uninit_move(void *target, void *source,
39 size_t count)
40 {
41 memmove(target, source, count);
42 uninit(source, count);
43 }
45 inline void swap(void *target, void *source, size_t count)
46 {
47 register unsigned char *t1 = (unsigned char*)target;
48 register unsigned char *t2 = (unsigned char*)source;
49 while (count-- > 0)
50 {
51 register unsigned char temp;
52 temp = *t1;
53 *(t1++) = *t2;
54 *(t2++) = temp;
55 }
56 }
58 inline void uninit(void *target, size_t count)
59 {
60 }
62 inline void *newarray(size_t count)
63 {
64 return new unsigned char[count];
65 }
67 inline void deletearray(void *buf)
68 {
69 deletev (unsigned char*)buf;
70 }
71} memops;
72
74inline size_t roundup(size_t value, size_t boundary)
75{
76 size_t mod = value % boundary;
77 return mod ? value + boundary - mod : value;
78}
79
80
81
82/***** WvBufStore *****/
83
84WvBufStore::WvBufStore(int _granularity) :
85 granularity(_granularity)
86{
87}
88
89
90size_t WvBufStore::peekable(int offset) const
91{
92 if (offset == 0)
93 {
94 return used();
95 }
96 else if (offset < 0)
97 {
98 if (size_t(-offset) <= ungettable())
99 return size_t(-offset) + used();
100 }
101 else
102 {
103 int avail = int(used()) - offset;
104 if (avail > 0)
105 return avail;
106 }
107 return 0; // out-of-bounds
108}
109
110
111void WvBufStore::move(void *buf, size_t count)
112{
113 while (count > 0)
114 {
115 size_t amount = count;
116 assert(amount != 0 ||
117 !"attempted to move() more than used()");
118 if (amount > count)
119 amount = count;
120 const void *data = get(amount);
121 memops.uninit_copy(buf, data, amount);
122 buf = (unsigned char*)buf + amount;
123 count -= amount;
124 }
125}
126
127
128void WvBufStore::copy(void *buf, int offset, size_t count)
129{
130 while (count > 0)
131 {
132 size_t amount = optpeekable(offset);
133 assert(amount != 0 ||
134 !"attempted to copy() with invalid offset");
135 if (amount > count)
136 amount = count;
137 const void *data = peek(offset, amount);
138 memops.uninit_copy(buf, data, amount);
139 buf = (unsigned char*)buf + amount;
140 count -= amount;
141 offset += amount;
142 }
143}
144
145
146void WvBufStore::put(const void *data, size_t count)
147{
148 while (count > 0)
149 {
150 size_t amount = optallocable();
151 assert(amount != 0 ||
152 !"attempted to put() more than free()");
153 if (amount > count)
154 amount = count;
155 void *buf = alloc(amount);
156 memops.uninit_copy(buf, data, amount);
157 data = (const unsigned char*)data + amount;
158 count -= amount;
159 }
160}
161
162
163void WvBufStore::fastput(const void *data, size_t count)
164{
165 void *buf = alloc(count);
166 memops.uninit_copy(buf, data, count);
167}
168
169
170void WvBufStore::poke(const void *data, int offset, size_t count)
171{
172 int limit = int(used());
173 assert(offset <= limit ||
174 !"attempted to poke() beyond end of buffer");
175 int end = offset + count;
176 if (end >= limit)
177 {
178 size_t tail = end - limit;
179 count -= tail;
180 put((const unsigned char*)data + count, tail);
181 }
182 while (count > 0)
183 {
184 size_t amount = optpeekable(offset);
185 assert(amount != 0 ||
186 !"attempted to poke() with invalid offset");
187 if (amount > count)
188 amount = count;
189 void *buf = mutablepeek(offset, amount);
190 memops.copy(buf, data, amount);
191 data = (const unsigned char*)data + amount;
192 count -= amount;
193 offset += amount;
194 }
195}
196
197
198void WvBufStore::merge(WvBufStore &instore, size_t count)
199{
200 if (count == 0)
201 return;
202
203 if (usessubbuffers() && instore.usessubbuffers())
204 {
205 // merge quickly by stealing subbuffers from the other buffer
206 for (;;)
207 {
208 WvBufStore *buf = instore.firstsubbuffer();
209 if (! buf)
210 break; // strange!
211
212 size_t avail = buf->used();
213 if (avail > count)
214 break;
215
216 // move the entire buffer
217 bool autofree = instore.unlinksubbuffer(buf, false);
218 appendsubbuffer(buf, autofree);
219 count -= avail;
220 if (count == 0)
221 return;
222 }
223 }
224 // merge slowly by copying data
225 basicmerge(instore, count);
226}
227
228
229void WvBufStore::basicmerge(WvBufStore &instore, size_t count)
230{
231 // move bytes as efficiently as we can using only the public API
232 if (count == 0)
233 return;
234 const void *indata = NULL;
235 void *outdata = NULL;
236 size_t inavail = 0;
237 size_t outavail = 0;
238 for (;;)
239 {
240 if (inavail == 0)
241 {
242 inavail = instore.optgettable();
243 assert(inavail != 0 ||
244 !"attempted to merge() more than instore.used()");
245 if (inavail > count)
246 inavail = count;
247 indata = instore.get(inavail);
248 }
249 if (outavail == 0)
250 {
251 outavail = optallocable();
252 assert(outavail != 0 ||
253 !"attempted to merge() more than free()");
254 if (outavail > count)
255 outavail = count;
256 outdata = alloc(outavail);
257 }
258 if (inavail < outavail)
259 {
260 memops.uninit_copy(outdata, indata, inavail);
261 count -= inavail;
262 outavail -= inavail;
263 if (count == 0)
264 {
265 unalloc(outavail);
266 return;
267 }
268 outdata = (unsigned char*)outdata + inavail;
269 inavail = 0;
270 }
271 else
272 {
273 memops.uninit_copy(outdata, indata, outavail);
274 count -= outavail;
275 if (count == 0) return;
276 inavail -= outavail;
277 indata = (const unsigned char*)indata + outavail;
278 outavail = 0;
279 }
280 }
281}
282
283
284
285/***** WvInPlaceBufStore *****/
286
287WvInPlaceBufStore::WvInPlaceBufStore(int _granularity,
288 void *_data, size_t _avail, size_t _size, bool _autofree) :
289 WvBufStore(_granularity), data(NULL)
290{
291 reset(_data, _avail, _size, _autofree);
292}
293
294
295WvInPlaceBufStore::WvInPlaceBufStore(int _granularity, size_t _size) :
296 WvBufStore(_granularity), data(NULL)
297{
298 reset(memops.newarray(_size), 0, _size, true);
299}
300
301
302WvInPlaceBufStore::~WvInPlaceBufStore()
303{
304 if (data && xautofree)
305 memops.deletearray(data);
306}
307
308
309void WvInPlaceBufStore::reset(void *_data, size_t _avail,
310 size_t _size, bool _autofree = false)
311{
312 assert(_data != NULL || _avail == 0);
313 if (data && _data != data && xautofree)
314 memops.deletearray(data);
315 data = _data;
316 xautofree = _autofree;
317 xsize = _size;
318 setavail(_avail);
319}
320
321
322void WvInPlaceBufStore::setavail(size_t _avail)
323{
324 assert(_avail <= xsize);
325 readidx = 0;
326 writeidx = _avail;
327}
328
329
330size_t WvInPlaceBufStore::used() const
331{
332 return writeidx - readidx;
333}
334
335
336const void *WvInPlaceBufStore::get(size_t count)
337{
338 assert(count <= writeidx - readidx ||
339 !"attempted to get() more than used()");
340 const void *tmpptr = (const unsigned char*)data + readidx;
341 readidx += count;
342 return tmpptr;
343}
344
345
346void WvInPlaceBufStore::unget(size_t count)
347{
348 assert(count <= readidx ||
349 !"attempted to unget() more than ungettable()");
350 readidx -= count;
351}
352
353
354size_t WvInPlaceBufStore::ungettable() const
355{
356 return readidx;
357}
358
359
360void WvInPlaceBufStore::zap()
361{
362 readidx = writeidx = 0;
363}
364
365
366size_t WvInPlaceBufStore::free() const
367{
368 return xsize - writeidx;
369}
370
371
372void *WvInPlaceBufStore::alloc(size_t count)
373{
374 assert(count <= xsize - writeidx ||
375 !"attempted to alloc() more than free()");
376 void *tmpptr = (unsigned char*)data + writeidx;
377 writeidx += count;
378 return tmpptr;
379}
380
381
382void WvInPlaceBufStore::unalloc(size_t count)
383{
384 assert(count <= writeidx - readidx ||
385 !"attempted to unalloc() more than unallocable()");
386 writeidx -= count;
387}
388
389
390size_t WvInPlaceBufStore::unallocable() const
391{
392 return writeidx - readidx;
393}
394
395
396void *WvInPlaceBufStore::mutablepeek(int offset, size_t count)
397{
398 if (count == 0)
399 return NULL;
400 assert(((offset <= 0) ?
401 size_t(-offset) <= readidx :
402 size_t(offset) < writeidx - readidx) ||
403 ! "attempted to peek() with invalid offset or count");
404 return (unsigned char*)data + readidx + offset;
405}
406
407
408
409/***** WvConstInPlaceBufStore *****/
410
411WvConstInPlaceBufStore::WvConstInPlaceBufStore(int _granularity,
412 const void *_data, size_t _avail) :
413 WvReadOnlyBufferStoreMixin<WvBufStore>(_granularity), data(NULL)
414{
415 reset(_data, _avail);
416}
417
418
419void WvConstInPlaceBufStore::reset(const void *_data, size_t _avail)
420{
421 assert(_data != NULL || _avail == 0);
422 data = _data;
423 setavail(_avail);
424}
425
426
427size_t WvConstInPlaceBufStore::used() const
428{
429 return avail - readidx;
430}
431
432
433void WvConstInPlaceBufStore::setavail(size_t _avail)
434{
435 avail = _avail;
436 readidx = 0;
437}
438
439
440const void *WvConstInPlaceBufStore::get(size_t count)
441{
442 assert(count <= avail - readidx ||
443 ! "attempted to get() more than used()");
444 const void *ptr = (const unsigned char*)data + readidx;
445 readidx += count;
446 return ptr;
447}
448
449
450void WvConstInPlaceBufStore::unget(size_t count)
451{
452 assert(count <= readidx ||
453 ! "attempted to unget() more than ungettable()");
454 readidx -= count;
455}
456
457
458size_t WvConstInPlaceBufStore::ungettable() const
459{
460 return readidx;
461}
462
463
464const void *WvConstInPlaceBufStore::peek(int offset, size_t count)
465{
466 if (count == 0)
467 return NULL;
468 assert(((offset <= 0) ?
469 size_t(-offset) <= readidx :
470 size_t(offset) < avail - readidx) ||
471 ! "attempted to peek() with invalid offset or count");
472 return (const unsigned char*)data + readidx + offset;
473}
474
475
476void WvConstInPlaceBufStore::zap()
477{
478 readidx = avail = 0;
479}
480
481
482
483/***** WvCircularBufStore *****/
484
485WvCircularBufStore::WvCircularBufStore(int _granularity,
486 void *_data, size_t _avail, size_t _size, bool _autofree) :
487 WvBufStore(_granularity), data(NULL)
488{
489 reset(_data, _avail, _size, _autofree);
490}
491
492
493WvCircularBufStore::WvCircularBufStore(int _granularity, size_t _size) :
494 WvBufStore(_granularity), data(NULL)
495{
496 reset(memops.newarray(_size), 0, _size, true);
497}
498
499
500WvCircularBufStore::~WvCircularBufStore()
501{
502 if (data && xautofree)
503 memops.deletearray(data);
504}
505
506
507void WvCircularBufStore::reset(void *_data, size_t _avail,
508 size_t _size, bool _autofree = false)
509{
510 assert(_data != NULL || _avail == 0);
511 if (data && _data != data && xautofree)
512 memops.deletearray(data);
513 data = _data;
514 xautofree = _autofree;
515 xsize = _size;
516 setavail(_avail);
517}
518
519
520void WvCircularBufStore::setavail(size_t _avail)
521{
522 assert(_avail <= xsize);
523 head = 0;
524 totalused = totalinit = _avail;
525}
526
527
528size_t WvCircularBufStore::used() const
529{
530 return totalused;
531}
532
533
534size_t WvCircularBufStore::optgettable() const
535{
536 size_t avail = xsize - head;
537 if (avail > totalused)
538 avail = totalused;
539 return avail;
540}
541
542
543const void *WvCircularBufStore::get(size_t count)
544{
545 assert(count <= totalused ||
546 ! "attempted to get() more than used()");
547 size_t first = ensurecontiguous(0, count, false /*keephistory*/);
548 const void *tmpptr = (const unsigned char*)data + first;
549 head = (head + count) % xsize;
550 totalused -= count;
551 return tmpptr;
552}
553
554
555void WvCircularBufStore::unget(size_t count)
556{
557 assert(count <= totalinit - totalused ||
558 !"attempted to unget() more than ungettable()");
559 head = (head + xsize - count) % xsize;
560 totalused += count;
561}
562
563
564size_t WvCircularBufStore::ungettable() const
565{
566 return totalinit - totalused;
567}
568
569
570void WvCircularBufStore::zap()
571{
572 head = 0;
573 totalused = totalinit = 0;
574}
575
576
577size_t WvCircularBufStore::free() const
578{
579 return xsize - totalused;
580}
581
582
583size_t WvCircularBufStore::optallocable() const
584{
585 size_t tail = head + totalused;
586 if (tail >= xsize)
587 return xsize - totalused;
588 return xsize - tail;
589}
590
591
592void *WvCircularBufStore::alloc(size_t count)
593{
594 assert(count <= xsize - totalused ||
595 !"attempted to alloc() more than free()");
596 totalinit = totalused; // always discard history
597 size_t first = ensurecontiguous(totalused, count,
598 false /*keephistory*/);
599 void *tmpptr = (unsigned char*)data + first;
600 totalused += count;
601 totalinit += count;
602 return tmpptr;
603}
604
605
606void WvCircularBufStore::unalloc(size_t count)
607{
608 assert(count <= totalused ||
609 !"attempted to unalloc() more than unallocable()");
610 totalused -= count;
611 totalinit -= count;
612}
613
614
615size_t WvCircularBufStore::unallocable() const
616{
617 return totalused;
618}
619
620
621void *WvCircularBufStore::mutablepeek(int offset, size_t count)
622{
623 if (count == 0)
624 return NULL;
625 assert(((offset <= 0) ?
626 size_t(-offset) <= totalinit - totalused :
627 size_t(offset) < totalused) ||
628 ! "attempted to peek() with invalid offset or count");
629 size_t first = ensurecontiguous(offset, count,
630 true /*keephistory*/);
631 void *tmpptr = (unsigned char*)data + first;
632 return tmpptr;
633}
634
635
636void WvCircularBufStore::normalize()
637{
638 // discard history to minimize data transfers
639 totalinit = totalused;
640
641 // normalize the buffer
642 compact(data, xsize, head, totalused);
643 head = 0;
644}
645
646
648 size_t count, bool keephistory)
649{
650 // determine the region of interest
651 size_t start = (head + offset + xsize) % xsize;
652 if (count != 0)
653 {
654 size_t end = start + count;
655 if (end > xsize)
656 {
657 // the region is not entirely contiguous
658 // determine the region that must be normalized
659 size_t keepstart = head;
660 if (keephistory)
661 {
662 // adjust the region to include history
663 keepstart += totalused - totalinit + xsize;
664 }
665 else
666 {
667 // discard history to minimize data transfers
668 totalinit = totalused;
669 }
670 keepstart %= xsize;
671
672 // normalize the buffer over this region
673 compact(data, xsize, keepstart, totalinit);
674 head = totalinit - totalused;
675
676 // compute the new start offset
677 start = (head + offset + xsize) % xsize;
678 }
679 }
680 return start;
681}
682
683
684void WvCircularBufStore::compact(void *data, size_t size,
685 size_t head, size_t count)
686{
687 if (count == 0)
688 {
689 // Case 1: Empty region
690 // Requires 0 moves
691 return;
692 }
693
694 if (head + count <= size)
695 {
696 // Case 2: Contiguous region
697 // Requires count moves
698 memops.uninit_move(data, (unsigned char*)data + head, count);
699 return;
700 }
701
702 size_t headcount = size - head;
703 size_t tailcount = count - headcount;
704 size_t freecount = size - count;
705 if (freecount >= headcount)
706 {
707 // Case 3: Non-contiguous region, does not require swapping
708 // Requires count moves
709 memops.uninit_move((unsigned char*)data + headcount,
710 data, tailcount);
711 memops.uninit_move(data, (unsigned char*)data + head,
712 headcount);
713 return;
714 }
715
716 // Case 4: Non-contiguous region, requires swapping
717 // Requires count * 2 moves
718 unsigned char *start = (unsigned char*)data;
719 unsigned char *end = (unsigned char*)data + head;
720 while (tailcount >= headcount)
721 {
722 memops.swap(start, end, headcount);
723 start += headcount;
724 tailcount -= headcount;
725 }
726 // Now the array looks like: |a|b|c|g|h|_|d|e|f|
727 // FIXME: this is an interim solution
728 void *buf = memops.newarray(tailcount);
729 memops.uninit_move(buf, start, tailcount);
730 memops.uninit_move(start, end, headcount);
731 memops.uninit_move(start + headcount, buf, tailcount);
732 memops.deletearray(buf);
733}
734
735
736
737/***** WvLinkedBufferStore *****/
738
739WvLinkedBufferStore::WvLinkedBufferStore(int _granularity) :
740 WvBufStore(_granularity), totalused(0), maxungettable(0)
741{
742}
743
744
746{
747 return true;
748}
749
750
752{
753 return list.count();
754}
755
756
758{
759 return list.first();
760}
761
762
764 bool autofree)
765{
766 list.append(buffer, autofree);
767 totalused += buffer->used();
768}
769
770
772 bool autofree)
773{
774 list.prepend(buffer, autofree);
775 totalused += buffer->used();
776 maxungettable = 0;
777}
778
779
781 bool allowautofree)
782{
783 WvBufStoreList::Iter it(list);
784 WvLink *link = it.find(buffer);
785 assert(link);
786
787 bool autofree = it.get_autofree();
788 totalused -= buffer->used();
789 if (buffer == list.first())
790 maxungettable = 0;
791 if (! allowautofree)
792 it.set_autofree(false);
793 it.unlink(); // do not recycle the buffer
794 return autofree;
795}
796
797
798size_t WvLinkedBufferStore::used() const
799{
800 assert(!totalused || !list.isempty());
801 return totalused;
802}
803
804
805size_t WvLinkedBufferStore::optgettable() const
806{
807 // find the first buffer with an optgettable() and return that
808 size_t count;
809 WvBufStoreList::Iter it(list);
810 for (it.rewind(); it.next(); )
811 if ((count = it->optgettable()) != 0)
812 return count;
813 return 0;
814}
815
816
817const void *WvLinkedBufferStore::get(size_t count)
818{
819 assert(!totalused || !list.isempty());
820 if (count == 0)
821 return NULL;
822
823 assert(count <= totalused);
824 assert(count > 0);
825
826 totalused -= count;
827
828 assert(totalused >= 0);
829
830 // search for first non-empty buffer
831 WvBufStore *buf;
832 size_t availused;
833 WvBufStoreList::Iter it(list);
834 for (;;)
835 {
836 it.rewind(); it.next();
837 buf = it.ptr();
838 assert(buf && "attempted to get() more than used()" &&
839 "totalused is wrong!");
840
841 availused = buf->used();
842 if (availused != 0)
843 break;
844
845 // unlink the leading empty buffer
846 do_xunlink(it);
847 }
848
849 // return the data
850 if (availused < count)
851 buf = coalesce(it, count);
852
853 maxungettable += count;
854 return buf->get(count);
855}
856
857
858void WvLinkedBufferStore::unget(size_t count)
859{
860 assert(!totalused || !list.isempty());
861 if (count == 0)
862 return;
863 assert(count > 0);
864 assert(!list.isempty());
865 assert(count <= maxungettable);
866 totalused += count;
867 maxungettable -= count;
868 list.first()->unget(count);
869}
870
871
872size_t WvLinkedBufferStore::ungettable() const
873{
874 assert(!totalused || !list.isempty());
875 if (list.isempty())
876 {
877 assert(maxungettable == 0);
878 return 0;
879 }
880
881 // maxungettable and list.first()->ungettable() can get out of sync in two ways:
882 // - coalescing moves data from later buffers to the first one, which
883 // leaves it as ungettable in those buffers. So when we first start to
884 // use a buffer, its ungettable() count may be too high. (This is the
885 // reason maxungettable exists.)
886 // - some calls (ie. alloc) may clear all ungettable data from the first
887 // buffer without telling us. So there might be less data to unget than we
888 // think.
889 size_t avail = list.first()->ungettable();
890 if (avail > maxungettable)
891 avail = maxungettable;
892 return avail;
893}
894
895
896void WvLinkedBufferStore::zap()
897{
898 totalused = 0;
899 maxungettable = 0;
900 WvBufStoreList::Iter it(list);
901 for (it.rewind(); it.next(); )
902 do_xunlink(it);
903}
904
905
906size_t WvLinkedBufferStore::free() const
907{
908 if (!list.isempty())
909 return list.last()->free();
910 return 0;
911}
912
913
914size_t WvLinkedBufferStore::optallocable() const
915{
916 if (!list.isempty())
917 return list.last()->optallocable();
918 return 0;
919}
920
921
922void *WvLinkedBufferStore::alloc(size_t count)
923{
924 if (count == 0)
925 return NULL;
926 assert(!list.isempty() && "attempted to alloc() more than free()");
927 totalused += count;
928 return list.last()->alloc(count);
929}
930
931
932void WvLinkedBufferStore::unalloc(size_t count)
933{
934 assert(count <= totalused);
935
936 totalused -= count;
937 while (count > 0)
938 {
939 assert(!list.isempty() &&
940 "attempted to unalloc() more than unallocable()" &&
941 "totalused is wrong");
942 WvBufStore *buf = list.last();
943 size_t avail = buf->unallocable();
944 if (count < avail)
945 {
946 buf->unalloc(count);
947 break;
948 }
949
950 WvBufStoreList::Iter it(list);
951 it.find(buf);
952 do_xunlink(it);
953
954 count -= avail;
955 }
956}
957
958
959size_t WvLinkedBufferStore::unallocable() const
960{
961 return totalused;
962}
963
964
965size_t WvLinkedBufferStore::optpeekable(int offset) const
966{
967 // search for the buffer that contains the offset
968 WvBufStoreList::Iter it(list);
969 int newoffset = search(it, offset);
970 WvBufStore *buf = it.ptr();
971 if (!buf)
972 return 0; // out of bounds
973 return buf->optpeekable(newoffset);
974}
975
976
977void *WvLinkedBufferStore::mutablepeek(int offset, size_t count)
978{
979 if (count == 0)
980 return NULL;
981
982 // search for the buffer that contains the offset
983 WvBufStoreList::Iter it(list);
984 offset = search(it, offset);
985 WvBufStore *buf = it.ptr();
986 assert(buf && "attempted to peek() with invalid offset or count");
987
988 // return data if we have enough
989 size_t availpeek = buf->peekable(offset);
990 if (availpeek < count)
991 buf = coalesce(it, count);
992 return buf->mutablepeek(offset, count);
993}
994
995
997{
998 minsize = roundup(minsize, granularity);
999 //return new WvInPlaceBufStore(granularity, minsize);
1000 return new WvCircularBufStore(granularity, minsize);
1001}
1002
1003
1005{
1006 delete buffer;
1007}
1008
1009
1010int WvLinkedBufferStore::search(WvBufStoreList::Iter &it,
1011 int offset) const
1012{
1013 it.rewind();
1014 if (it.next())
1015 {
1016 if (offset < 0)
1017 {
1018 // inside unget() region
1019 WvBufStore *buf = it.ptr();
1020 if (size_t(-offset) <= buf->ungettable())
1021 return offset;
1022 it.rewind(); // mark out of bounds
1023 }
1024 else
1025 {
1026 // inside get() region
1027 do
1028 {
1029 WvBufStore *buf = it.ptr();
1030 size_t avail = buf->used();
1031 if (size_t(offset) < avail)
1032 return offset;
1033 offset -= avail;
1034 }
1035 while (it.next());
1036 }
1037 }
1038 return 0;
1039}
1040
1041
1043 size_t count)
1044{
1045 WvBufStore *buf = it.ptr();
1046 size_t availused = buf->used();
1047 if (count <= availused)
1048 return buf;
1049
1050 // allocate a new buffer if there is not enough room to coalesce
1051 size_t needed = count - availused;
1052 size_t availfree = buf->free();
1053 size_t mustskip = 0;
1054 if (availfree < needed)
1055 {
1056 // if this is the first buffer, then we need to unget as
1057 // much as possible to ensure it does not get discarded
1058 // during the coalescing phase
1059 if (buf == list.first() && totalused != 0)
1060 {
1061 // use ungettable() instead of buf->ungettable() because we might
1062 // have reset it to 0
1063 // FIXME: uh... who might have reset it to 0, and why?
1064 mustskip = ungettable();
1065 buf->unget(mustskip);
1066 }
1067
1068 needed = count + mustskip;
1069 buf = newbuffer(needed);
1070
1071 // insert the buffer before the previous link
1072 list.add_after(it.prev, buf, true);
1073 it.find(buf);
1074 }
1075
1076 // coalesce subsequent buffers into the first
1077 while (it.next())
1078 {
1079 WvBufStore *itbuf = it.ptr();
1080 size_t chunk = itbuf->used();
1081 if (chunk > 0)
1082 {
1083 if (chunk > needed)
1084 chunk = needed;
1085 buf->merge(*itbuf, chunk);
1086 needed -= chunk;
1087 if (needed == 0)
1088 {
1089 buf->skip(mustskip);
1090 return buf;
1091 }
1092 }
1093 do_xunlink(it); // buffer is now empty
1094 }
1095 assert(false && "invalid count during get() or peek()");
1096 return NULL;
1097}
1098
1099
1100void WvLinkedBufferStore::do_xunlink(WvBufStoreList::Iter &it)
1101{
1102 WvBufStore *buf = it.ptr();
1103 if (buf == list.first())
1104 maxungettable = 0;
1105
1106 bool autofree = it.get_autofree();
1107 it.set_autofree(false);
1108 it.xunlink();
1109 if (autofree)
1110 recyclebuffer(buf);
1111}
1112
1113
1114
1115/***** WvDynBufStore *****/
1116
1117WvDynBufStore::WvDynBufStore(size_t _granularity,
1118 size_t _minalloc, size_t _maxalloc) :
1119 WvLinkedBufferStore(_granularity),
1120 minalloc(_minalloc), maxalloc(_maxalloc)
1121{
1122 assert(maxalloc >= minalloc);
1123}
1124
1125
1126size_t WvDynBufStore::free() const
1127{
1128 return UNLIMITED_FREE_SPACE;
1129}
1130
1131
1132size_t WvDynBufStore::optallocable() const
1133{
1134 size_t avail = WvLinkedBufferStore::optallocable();
1135 if (avail == 0)
1136 avail = UNLIMITED_FREE_SPACE;
1137 return avail;
1138}
1139
1140
1141void *WvDynBufStore::alloc(size_t count)
1142{
1143 if (count > WvLinkedBufferStore::free())
1144 {
1145 WvBufStore *buf = newbuffer(count);
1146 appendsubbuffer(buf, true);
1147 }
1148 return WvLinkedBufferStore::alloc(count);
1149}
1150
1151
1153{
1154 // allocate a new buffer
1155 // try to approximate exponential growth by at least doubling
1156 // the amount of space available for immediate use
1157 size_t size = used();
1158 if (size < minsize * 2)
1159 size = minsize * 2;
1160 if (size < minalloc)
1161 size = minalloc;
1162 else if (size > maxalloc)
1163 size = maxalloc;
1164 if (size < minsize)
1165 size = minsize;
1166 return WvLinkedBufferStore::newbuffer(size);
1167}
1168
1169
1170
1171/***** WvNullBufStore *****/
1172
1173WvNullBufStore::WvNullBufStore(size_t _granularity) :
1175 WvReadOnlyBufferStoreMixin<WvBufStore> >(_granularity)
1176{
1177}
1178
1179
1180
1181/***** WvBufCursorStore *****/
1182
1183WvBufCursorStore::WvBufCursorStore(size_t _granularity,
1184 WvBufStore *_buf, int _start, size_t _length) :
1186 buf(_buf), start(_start), length(_length), shift(0)
1187{
1188}
1189
1190
1191bool WvBufCursorStore::isreadable() const
1192{
1193 return buf->isreadable();
1194}
1195
1196
1197size_t WvBufCursorStore::used() const
1198{
1199 return length - shift;
1200}
1201
1202
1203size_t WvBufCursorStore::optgettable() const
1204{
1205 size_t avail = buf->optpeekable(start + shift);
1206 assert(avail != 0 || length == shift ||
1207 ! "buffer cursor operating over invalid region");
1208 if (avail > length)
1209 avail = length;
1210 return avail;
1211}
1212
1213
1214const void *WvBufCursorStore::get(size_t count)
1215{
1216 assert(count <= length - shift ||
1217 ! "attempted to get() more than used()");
1218 const void *data = buf->peek(start + shift, count);
1219 shift += count;
1220 return data;
1221}
1222
1223
1224void WvBufCursorStore::skip(size_t count)
1225{
1226 assert(count <= length - shift ||
1227 ! "attempted to skip() more than used()");
1228 shift += count;
1229}
1230
1231
1232void WvBufCursorStore::unget(size_t count)
1233{
1234 assert(count <= shift ||
1235 ! "attempted to unget() more than ungettable()");
1236 shift -= count;
1237}
1238
1239
1240size_t WvBufCursorStore::ungettable() const
1241{
1242 return shift;
1243}
1244
1245
1246void WvBufCursorStore::zap()
1247{
1248 shift = length;
1249}
1250
1251
1252size_t WvBufCursorStore::peekable(int offset) const
1253{
1254 offset += shift;
1255 offset -= start;
1256 if (offset < 0 || offset > int(length))
1257 return 0;
1258 return length - size_t(offset);
1259}
1260
1261
1262size_t WvBufCursorStore::optpeekable(int offset) const
1263{
1264 size_t avail = buf->optpeekable(start + shift + offset);
1265 assert(avail != 0 || length == shift ||
1266 ! "buffer cursor operating over invalid region");
1267 size_t max = peekable(offset);
1268 if (avail > max)
1269 avail = max;
1270 return avail;
1271}
1272
1273
1274const void *WvBufCursorStore::peek(int offset, size_t count)
1275{
1276 offset += shift;
1277 assert((offset >= start && offset - start + count <= length) ||
1278 ! "attempted to peek() with invalid offset or count");
1279 return buf->peek(offset, count);
1280}
1281
1282
1283bool WvBufCursorStore::iswritable() const
1284{
1285 // check if mutablepeek() is supported
1286 return buf->iswritable();
1287}
1288
1289
1290void *WvBufCursorStore::mutablepeek(int offset, size_t count)
1291{
1292 offset += shift;
1293 assert((offset >= start && offset - start + count <= length) ||
1294 ! "attempted to peek() with invalid offset or count");
1295 return buf->mutablepeek(offset, count);
1296}
The abstract buffer storage base class.
Definition: wvbufstore.h:27
virtual bool usessubbuffers() const
Returns true if the buffer uses subbuffers for storage.
Definition: wvbufstore.h:96
virtual void appendsubbuffer(WvBufStore *buffer, bool autofree)
Appends a subbuffer to the buffer.
Definition: wvbufstore.h:111
virtual WvBufStore * firstsubbuffer() const
Returns the first subbuffer.
Definition: wvbufstore.h:107
virtual bool unlinksubbuffer(WvBufStore *buffer, bool allowautofree)
Unlinks the specified subbuffer.
Definition: wvbufstore.h:123
The WvCircularBuf storage class.
Definition: wvbufstore.h:321
size_t ensurecontiguous(int offset, size_t count, bool keephistory)
Ensures that count new bytes can be read from or written to the buffer beginning at the specified off...
static void compact(void *data, size_t size, size_t head, size_t count)
Compacts an array arranged as a circular buffer such that the specified region is moved to the beginn...
virtual WvBufStore * newbuffer(size_t minsize)
Called when a new buffer must be allocated to coalesce chunks.
The WvLinkedBuffer storage class.
Definition: wvbufstore.h:402
virtual WvBufStore * newbuffer(size_t minsize)
Called when a new buffer must be allocated to coalesce chunks.
virtual void prependsubbuffer(WvBufStore *buffer, bool autofree)
Prepends a subbuffer to the buffer.
int search(WvBufStoreList::Iter &it, int offset) const
Searches for the buffer containing the offset.
virtual bool usessubbuffers() const
Returns true if the buffer uses subbuffers for storage.
virtual void recyclebuffer(WvBufStore *buffer)
Called when a buffer with autofree is removed from the list.
virtual void appendsubbuffer(WvBufStore *buffer, bool autofree)
Appends a subbuffer to the buffer.
virtual bool unlinksubbuffer(WvBufStore *buffer, bool allowautofree)
Unlinks the specified subbuffer.
virtual size_t numsubbuffers() const
Returns the number of subbuffers in the buffer.
WvBufStore * coalesce(WvBufStoreList::Iter &it, size_t count)
Coalesces a sequence of buffers.
virtual WvBufStore * firstsubbuffer() const
Returns the first subbuffer.
A statically bound mixin template for buffer implementations that are read-only.
Definition: wvbufstore.h:140
A statically bound mixin template for buffer implementations that are write-only.
Definition: wvbufstore.h:192
#define deletev
Remplacement for delete[].
Definition: delete.h:129
An abstraction for memory transfer operations.
void deletearray(void *buf)
Deletes an uninitialized array.
void swap(void *target, void *source, size_t count)
Swaps initialized regions.
void copy(void *target, const void *source, size_t count)
Copies initialized region to initialized region.
void uninit(void *target, size_t count)
Uninitializes a region.
void uninit_move(void *target, void *source, size_t count)
Moves initialized region to uninitialized region.
void * newarray(size_t count)
Creates a new array.
void uninit_copy(void *target, const void *source, size_t count)
Copies initialized region to uninitialized region.