Lumiera  0.pre.03
»edit your freedom«
block-flow-test.cpp
Go to the documentation of this file.
1 /*
2  BlockFlow(Test) - verify scheduler memory management scheme
3 
4  Copyright (C) Lumiera.org
5  2023, Hermann Vosseler <Ichthyostega@web.de>
6 
7  This program is free software; you can redistribute it and/or
8  modify it under the terms of the GNU General Public License as
9  published by the Free Software Foundation; either version 2 of
10  the License, or (at your option) any later version.
11 
12  This program is distributed in the hope that it will be useful,
13  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  GNU General Public License for more details.
16 
17  You should have received a copy of the GNU General Public License
18  along with this program; if not, write to the Free Software
19  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 
21 * *****************************************************/
22 
28 #include "lib/test/run.hpp"
29 #include "lib/test/test-helper.hpp"
32 #include "lib/time/timevalue.hpp"
33 #include "lib/meta/function.hpp"
34 #include "lib/format-cout.hpp"
35 #include "lib/util.hpp"
36 
37 #include <chrono>
38 #include <vector>
39 #include <tuple>
40 
41 using test::Test;
42 using util::isSameObject;
45 using lib::time::Offset;
46 
47 using std::vector;
48 using std::pair;
50 
51 
52 namespace vault{
53 namespace gear {
54 namespace test {
55 
56  namespace { // shorthand for test parametrisation
57 
61  using Extent = BlockFlow::Extent;
62  using Epoch = BlockFlow::Epoch;
63 
64  const size_t EXTENT_SIZ = Extent::SIZ();
65  Duration INITIAL_EPOCH_STEP = Strategy{}.initialEpochStep();
66  const size_t AVERAGE_EPOCHS = Strategy{}.averageEpochs();
67  const double BOOST_OVERFLOW = Strategy{}.boostFactorOverflow();
68  const double TARGET_FILL = Strategy{}.config().TARGET_FILL;
69  const double ACTIVITIES_P_FR = Strategy{}.config().ACTIVITIES_PER_FRAME;
70  }
71 
72 
73 
74 
75 
76  /*****************************************************************/
81  class BlockFlow_test : public Test
82  {
83 
84  virtual void
85  run (Arg)
86  {
87  simpleUsage();
88  handleEpoch();
89  placeActivity();
90  adjustEpochs();
91  announceLoad();
92  storageFlow();
93  }
94 
95 
100  void
102  {
103  BlockFlow bFlow;
104  Time deadline = randTime();
105 
106  Activity& tick = bFlow.until(deadline).create();
107  CHECK (tick.verb_ == Activity::TICK);
108  CHECK (1 == watch(bFlow).cntElm());
109  CHECK (1 == watch(bFlow).cntEpochs());
110  CHECK (watch(bFlow).first() > deadline);
111  CHECK (watch(bFlow).first() - deadline == bFlow.getEpochStep());
112 
113  bFlow.discardBefore (deadline + Time{0,5});
114  CHECK (0 == watch(bFlow).cntEpochs());
115  CHECK (0 == watch(bFlow).cntElm());
116  }
117 
118 
119 
131  void
133  {
134  Allocator alloc;
135  alloc.openNew();
136 
137  // the raw storage Extent is a compact block
138  // providing uninitialised storage typed as `vault::gear::Activity`
139  Extent& extent = *alloc.begin();
140  CHECK (extent.size() == Extent::SIZ::value);
141  CHECK (sizeof(extent) == extent.size() * sizeof(Activity));
142  CHECK (showType<Extent::value_type>() == "vault::gear::Activity"_expect);
143 
144  // we can just access some slot and place data there
145  extent[55].data_.feed.one = 555555555555555;
146 
147  // now establish an Epoch placed into this storage block:
148  Epoch& epoch = Epoch::setup (alloc.begin(), Time{0,10});
149 
150  // the underlying storage is not touched yet...
151  CHECK (epoch[55].data_.feed.one == 555555555555555);
152 
153  // but in the first slot, an »EpochGate« has been implanted
154  Epoch::EpochGate& gate = epoch.gate();
155  CHECK (isSameObject (gate, epoch[0]));
156  CHECK (isSameObject (epoch[0], extent[0]));
157  CHECK (Time{gate.deadline()} == Time(0,10));
158  CHECK (Time{gate.deadline()} == Time{epoch[0].data_.condition.dead});
159  CHECK (epoch[0].is (Activity::GATE));
160 
161  // the gate's `next`-pointer is (ab)used to manage the next allocation slot
162  CHECK (isSameObject (*gate.next, epoch[extent.size()-1]));
163  CHECK (0 == gate.filledSlots());
164  CHECK (0 == epoch.getFillFactor());
165 
166  // the storage there is not used yet....
167  epoch[extent.size()-1].data_.timing.instant = Time{5,5};
168  // ....but will be overwritten by the following ctor call
169 
170  // allocate a new Activity into the next free slot (using a faked AllocatorHandle)
171  BlockFlow::AllocatorHandle allocHandle{alloc.begin(), nullptr};
172  Activity& timeStart = allocHandle.create (Activity::WORKSTART);
173  CHECK (isSameObject (timeStart, epoch[extent.size()-1]));
174 
175  // this Activity object is properly initialised (and memory was altered)
176  CHECK (epoch[extent.size()-1].data_.timing.instant != Time(5,5));
177  CHECK (epoch[extent.size()-1].data_.timing.instant == Time::NEVER);
178  CHECK (timeStart.verb_ == Activity::WORKSTART);
179  CHECK (timeStart.data_.timing.instant == Time::NEVER);
180  CHECK (timeStart.data_.timing.quality == 0);
181 
182  // and the free-pointer was decremented to point to the next free slot
183  CHECK (isSameObject (*gate.next, epoch[extent.size()-2]));
184 
185  // which also implies that there is still ample space left...
186  CHECK (1 == gate.filledSlots());
187  CHECK (gate.hasFreeSlot());
188 
189  CHECK (epoch.getFillFactor() == double(gate.filledSlots()) / (EXTENT_SIZ-1));
190 
191  // so let's eat this space up...
192  for (uint i=extent.size()-2; i>1; --i)
193  gate.claimNextSlot();
194 
195  // one final slot is left (beyond the EpochGate itself)
196  CHECK (isSameObject (*gate.next, epoch[1]));
197  CHECK (gate.filledSlots() == EXTENT_SIZ-2);
198  CHECK (gate.hasFreeSlot());
199 
200  gate.claimNextSlot();
201  // aaand the boat is full...
202  CHECK (not gate.hasFreeSlot());
203  CHECK (isSameObject (*gate.next, epoch[0]));
204  CHECK (gate.filledSlots() == EXTENT_SIZ-1);
205  CHECK (epoch.getFillFactor() == 1);
206 
207  // a given Epoch can be checked for relevance against a deadline
208  CHECK (gate.deadline() == Time(0,10));
209 
210  CHECK ( gate.isAlive (Time(0,5)));
211  CHECK ( gate.isAlive (Time(999,9)));
212  CHECK (not gate.isAlive (Time(0,10)));
213  CHECK (not gate.isAlive (Time(1,10)));
214  }
215 
216 
217 
228  void
230  {
231  BlockFlow bFlow;
232 
233  Time t1 = Time{ 0,10};
234  Time t2 = Time{500,10};
235  Time t3 = Time{ 0,11};
236 
237  // no Epoch established yet...
238  auto& a1 = bFlow.until(t1).create();
239  CHECK (watch(bFlow).allEpochs() == "10s200ms"_expect);
240  CHECK (watch(bFlow).find(a1) == "10s200ms"_expect);
241 
242  // setup Epoch grid into the future
243  auto& a3 = bFlow.until(t3).create();
244  CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect);
245  CHECK (watch(bFlow).find(a3) == "11s"_expect);
246 
247  // associate to existing Epoch
248  auto& a2 = bFlow.until(t2).create();
249  CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect);
250  CHECK (watch(bFlow).find(a2) == "10s600ms"_expect);
251 
252  Time t0 = Time{0,5};
253  // late(past) Activity is placed in the oldest Epoch alive
254  auto& a0 = bFlow.until(t0).create();
255  CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect);
256  CHECK (watch(bFlow).find(a0) == "10s200ms"_expect);
257 
258  // provoke Epoch overflow by exhausting all available storage slots
259  BlockFlow::AllocatorHandle allocHandle = bFlow.until(Time{300,10});
260  for (uint i=1; i<EXTENT_SIZ; ++i)
261  allocHandle.create();
262 
263  CHECK (allocHandle.currDeadline() == Time(400,10));
264  CHECK (not allocHandle.hasFreeSlot());
265 
266  // ...causing next allocation to be shifted into subsequent Epoch
267  auto& a4 = allocHandle.create();
268  CHECK (allocHandle.currDeadline() == Time(600,10));
269  CHECK (allocHandle.hasFreeSlot());
270  CHECK (watch(bFlow).find(a4) == "10s600ms"_expect);
271 
272  // fill up and exhaust this Epoch too....
273  for (uint i=1; i<EXTENT_SIZ; ++i)
274  allocHandle.create();
275 
276  // so the handle has moved to the after next Epoch
277  CHECK (allocHandle.currDeadline() == Time(800,10));
278  CHECK (allocHandle.hasFreeSlot());
279 
280  // even allocation with way earlier deadline is shifted here now
281  auto& a5 = bFlow.until(Time{220,10}).create();
282  CHECK (watch(bFlow).find(a5) == "10s800ms"_expect);
283 
284  // now repeat the same pattern, but now towards uncharted Epochs
285  allocHandle = bFlow.until(Time{900,10});
286  for (uint i=2; i<EXTENT_SIZ; ++i)
287  allocHandle.create();
288 
289  CHECK (allocHandle.currDeadline() == Time(0,11));
290  CHECK (not allocHandle.hasFreeSlot());
291  auto& a6 = bFlow.until(Time{850,10}).create();
292  // Note: encountered four overflow-Events, leading to decreased Epoch spacing for new Epochs
293  CHECK (watch(bFlow).find(a6) == "11s192ms"_expect);
294  CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s|11s192ms"_expect);
295 
296  auto& a7 = bFlow.until(Time{500,11}).create();
297  // this allocation does not count as overflow, but has to expand the Epoch grid, now using the reduced Epoch spacing
298  CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s|11s192ms|11s384ms|11s576ms"_expect);
299  CHECK (watch(bFlow).find(a7) == "11s576ms"_expect);
300 
301  // we created 8 elements (a0...a7) and caused three epochs to overflow...
302  CHECK (watch(bFlow).cntElm() == 8 + EXTENT_SIZ-1 + EXTENT_SIZ-1 + EXTENT_SIZ-2);
303 
304  // on clean-up, actual fill ratio is used to adjust to optimise Epoch length for better space usage
305  CHECK (bFlow.getEpochStep() == "≺192ms≻"_expect);
306  bFlow.discardBefore (Time{999,10});
307  CHECK (bFlow.getEpochStep() == "≺218ms≻"_expect);
308  CHECK (watch(bFlow).allEpochs() == "11s|11s192ms|11s384ms|11s576ms"_expect);
309 
310  // placed into the oldest Epoch still alive
311  auto& a8 = bFlow.until(Time{500,10}).create();
312  CHECK (watch(bFlow).find(a8) == "11s192ms"_expect);
313  }
314 
315 
316 
322  void
324  {
325  BlockFlow bFlow;
326  CHECK (bFlow.getEpochStep() == INITIAL_EPOCH_STEP);
327 
328  // whenever an Epoch overflow happens, capacity is boosted by reducing the Epoch duration
329  bFlow.markEpochOverflow();
330  CHECK (bFlow.getEpochStep() == INITIAL_EPOCH_STEP * BOOST_OVERFLOW);
331  bFlow.markEpochOverflow();
332  CHECK (bFlow.getEpochStep() == INITIAL_EPOCH_STEP * BOOST_OVERFLOW*BOOST_OVERFLOW);
333 
334  // To counteract this increase, on clean-up the actual fill rate of the Extent
335  // serves to guess an optimal Epoch duration, which is averaged exponentially
336 
337  // Using just arbitrary demo values for some fictional Epochs
338  TimeVar dur1 = INITIAL_EPOCH_STEP;
339  double fac1 = 0.8;
340  TimeVar dur2 = INITIAL_EPOCH_STEP * BOOST_OVERFLOW;
341  double fac2 = 0.3;
342 
343  double goal1 = double(_raw(dur1)) / (fac1/TARGET_FILL);
344  double goal2 = double(_raw(dur2)) / (fac2/TARGET_FILL);
345 
346  auto movingAverage = [&](TimeValue old, double contribution)
347  {
348  auto N = AVERAGE_EPOCHS;
349  auto averageTicks = double(_raw(old))*(N-1)/N + contribution/N;
350  return TimeValue{gavl_time_t (floor (averageTicks))};
351  };
352 
353  TimeVar step = bFlow.getEpochStep();
354  bFlow.markEpochUnderflow (dur1, fac1);
355  CHECK (bFlow.getEpochStep() == movingAverage(step, goal1));
356 
357  step = bFlow.getEpochStep();
358  bFlow.markEpochUnderflow (dur2, fac2);
359  CHECK (bFlow.getEpochStep() == movingAverage(step, goal2));
360  }
361 
362 
363 
365  void
367  {
368  BlockFlow bFlow;
369 
370  Duration initialStep{bFlow.getEpochStep()};
371  size_t initialFPS = Strategy{}.initialFrameRate();
372 
373  // signal that the load will be doubled
374  bFlow.announceAdditionalFlow (FrameRate(initialFPS));
375  CHECK (bFlow.getEpochStep() * 2 == initialStep);
376 
377  // signal that the load will again be doubled
378  bFlow.announceAdditionalFlow (FrameRate(2*initialFPS));
379  CHECK (bFlow.getEpochStep() * 4 == initialStep);
380  }
381 
382 
383 
384 
402  void
404  {
405  const size_t FPS = 200;
406  const size_t TICK_P_S = FPS * ACTIVITIES_P_FR; // Simulated throughput 200 frames per second
407  const gavl_time_t STP = Time::SCALE / TICK_P_S; // Simulation stepping (here 2 steps per ms)
408  const gavl_time_t RUN = _raw(Time{0,0,3}); // nominal length of the simulation time axis
409  Offset BASE_DEADLINE{FSecs{1,2}}; // base pre-roll before deadline
410  Offset SPREAD_DEAD{FSecs{2,100}}; // random spread of deadline around base
411  const uint INVOKE_LAG = _raw(Time{250,0}) /STP; // „invoke“ the Activity after simulated 250ms (≙ 500 steps)
412  const uint CLEAN_UP = _raw(Time{100,0}) /STP; // perform clean-up every 200 steps
413  const uint INSTANCES = RUN /STP; // 120000 Activity records to send through the test subject
414  const uint MAX_TIME = INSTANCES
415  +INVOKE_LAG+2*CLEAN_UP; // overall count of Test steps to perform
416 
417  using TestData = vector<pair<TimeVar, size_t>>;
418  using Subjects = vector<reference_wrapper<Activity>>;
419 
420  // pre-generate random test data
421  TestData testData{INSTANCES};
422  for (size_t i=0; i<INSTANCES; ++i)
423  {
424  const size_t SPREAD = 2*_raw(SPREAD_DEAD);
425  const size_t MIN_DEAD = _raw(BASE_DEADLINE) - _raw(SPREAD_DEAD);
426 
427  auto&[t,r] = testData[i];
428  r = rand() % SPREAD;
429  t = TimeValue(i*STP + MIN_DEAD + r);
430  }
431 
432  Activity dummy; // reserve memory for test subject index
433  Subjects subject{INSTANCES, std::ref(dummy)};
434 
435  auto runTest = [&](auto allocate, auto invoke) -> size_t
436  {
437  // allocate Activity record for deadline and with given random payload
438  ASSERT_VALID_SIGNATURE (decltype(allocate), Activity&(Time, size_t));
439 
440  // access the given Activity, read the payload, then trigger disposal
441  ASSERT_VALID_SIGNATURE (decltype(invoke), size_t(Activity&));
442 
443  size_t checksum{0};
444  for (size_t i=0; i<MAX_TIME; ++i)
445  {
446  if (i < INSTANCES)
447  {
448  auto const& data = testData[i];
449  subject[i] = allocate(data.first, data.second);
450  }
451  if (INVOKE_LAG <= i and i-INVOKE_LAG < INSTANCES)
452  checksum += invoke(subject[i-INVOKE_LAG]);
453  }
454  return checksum;
455  };
456 
457  auto benchmark = [INSTANCES](auto invokeTest)
458  { // does the timing measurement with result in µ-seconds
459  return lib::test::benchmarkTime(invokeTest, INSTANCES);
460  };
461 
462 
463 
464  /* =========== Test-Setup-1: no individual allocations/deallocations ========== */
465  size_t sum1{0};
466  vector<Activity> storage{INSTANCES};
467  auto noAlloc = [&]{ // use pre-allocated storage block
468  auto allocate = [i=0, &storage](Time, size_t check) mutable -> Activity&
469  {
470  return *new(&storage[i++]) Activity{check, size_t{55}};
471  };
472  auto invoke = [](Activity& feedActivity)
473  {
474  return feedActivity.data_.feed.one;
475  };
476 
477  sum1 = runTest (allocate, invoke);
478  };
479 
480 
481  /* =========== Test-Setup-2: individual heap allocations ========== */
482  size_t sum2{0};
483  auto heapAlloc = [&]{
484  auto allocate = [](Time, size_t check) mutable -> Activity&
485  {
486  return *new Activity{check, size_t{55}};
487  };
488  auto invoke = [](Activity& feedActivity)
489  {
490  size_t check = feedActivity.data_.feed.one;
491  delete &feedActivity;
492  return check;
493  };
494 
495  sum2 = runTest (allocate, invoke);
496  };
497 
498 
499  /* =========== Test-Setup-3: manage individually by ref-cnt ========== */
500  size_t sum3{0};
501  vector<std::shared_ptr<Activity>> manager{INSTANCES};
502  auto sharedAlloc = [&]{
503  auto allocate = [&, i=0](Time, size_t check) mutable -> Activity&
504  {
505  Activity* a = new Activity{check, size_t{55}};
506  manager[i].reset(a);
507  ++i;
508  return *a;
509  };
510  auto invoke = [&, i=0](Activity& feedActivity) mutable
511  {
512  size_t check = feedActivity.data_.feed.one;
513  manager[i].reset();
514  return check;
515  };
516 
517  sum3 = runTest (allocate, invoke);
518  };
519 
520 
521  /* =========== Test-Setup-4: use BlockFlow allocation scheme ========== */
522 
523  size_t sum4{0};
525  // Note: using the RenderConfig, which uses larger blocks and more pre-allocation
526  auto blockFlowAlloc = [&]{
527  auto allocHandle = blockFlow.until(Time{BASE_DEADLINE});
528  auto allocate = [&, j=0](Time t, size_t check) mutable -> Activity&
529  {
530  if (++j >= 10) // typically several Activities are allocated on the same deadline
531  {
532  allocHandle = blockFlow.until(t);
533  j = 0;
534  }
535  return allocHandle.create (check, size_t{55});
536  };
537  auto invoke = [&, i=0](Activity& feedActivity) mutable
538  {
539  size_t check = feedActivity.data_.feed.one;
540  if (i % CLEAN_UP == 0)
541  blockFlow.discardBefore (Time{TimeValue{i*STP}});
542  ++i;
543  return check;
544  };
545 
546  sum4 = runTest (allocate, invoke);
547  };
548 
549  // INVOKE Setup-1
550  auto time_noAlloc = benchmark(noAlloc);
551 
552  // INVOKE Setup-2
553  auto time_heapAlloc = benchmark(heapAlloc);
554 
555  // INVOKE Setup-3
556  auto time_sharedAlloc = benchmark(sharedAlloc);
557 
558  cout<<"\n\n■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■"<<endl;
559 
560  // INVOKE Setup-4
561  auto time_blockFlow = benchmark(blockFlowAlloc);
562 
563  Duration expectStep{FSecs{blockFlow.framesPerEpoch(), FPS} * 9/10};
564 
565  cout<<"\n___Microbenchmark____"
566  <<"\nnoAlloc : "<<time_noAlloc
567  <<"\nheapAlloc : "<<time_heapAlloc
568  <<"\nsharedAlloc : "<<time_sharedAlloc
569  <<"\nblockFlow : "<<time_blockFlow
570  <<"\n_____________________\n"
571  <<"\ninstances.... "<<INSTANCES
572  <<"\nfps.......... "<<FPS
573  <<"\nActivities/s. "<<TICK_P_S
574  <<"\nEpoch(expect) "<<expectStep
575  <<"\nEpoch (real) "<<blockFlow.getEpochStep()
576  <<"\ncnt Epochs... "<<watch(blockFlow).cntEpochs()
577  <<"\nalloc pool... "<<watch(blockFlow).poolSize()
578  <<endl;
579 
580  // all Activities have been read in all test cases,
581  // yielding identical checksum
582  CHECK (sum1 == sum2);
583  CHECK (sum1 == sum3);
584  CHECK (sum1 == sum4);
585 
586  // Epoch spacing regulation must be converge up to ±10ms
587  CHECK (expectStep - blockFlow.getEpochStep() < Time(10,0));
588 
589  // after the initial overload is levelled,
590  // only a small number of Epochs should be active
591  CHECK (watch(blockFlow).cntEpochs() < 8);
592 
593  // Due to Debug / Release builds, we can not check the runtime only a very rough margin.
594  // With -O3, this amortised allocation time should be way below time_sharedAlloc
595  CHECK (time_blockFlow < 800);
596  }
597  };
598 
599 
601  LAUNCHER (BlockFlow_test, "unit engine");
602 
603 
604 
605 }}} // namespace vault::gear::test
Allocation scheme for the Scheduler, based on Epoch(s).
Definition: block-flow.hpp:350
a mutable time value, behaving like a plain number, allowing copy and re-accessing ...
Definition: timevalue.hpp:241
signal start of some processing and transition grooming mode ⟼ *work mode
Definition: activity.hpp:242
#define ASSERT_VALID_SIGNATURE(_FUN_, _SIG_)
Macro for a compile-time check to verify the given generic functors or lambdas expose some expected s...
Definition: function.hpp:256
Record to describe an Activity, to happen within the Scheduler&#39;s control flow.
Definition: activity.hpp:235
Automatically use custom string conversion in C++ stream output.
Memory management scheme for activities and parameter data passed through the Scheduler within the Lu...
Policy template to mix into the BlockFlow allocator, providing the parametrisation for self-regulatio...
Definition: block-flow.hpp:163
Definition: run.hpp:49
Framerate specified as frames per second.
Definition: timevalue.hpp:664
Allocation Extent holding scheduler Activities to be performed altogether before a common deadline...
Definition: block-flow.hpp:233
static const gavl_time_t SCALE
Number of micro ticks (µs) per second as basic time scale.
Definition: timevalue.hpp:176
Functions to perform (multithreaded) timing measurement on a given functor.
Local handle to allow allocating a collection of Activities, all sharing a common deadline...
Definition: block-flow.hpp:465
void markEpochOverflow()
Notify and adjust Epoch capacity as consequence of exhausting an Epoch.
Definition: block-flow.hpp:611
Lumiera&#39;s internal time value datatype.
Definition: timevalue.hpp:308
Abstract Base Class for all testcases.
Definition: run.hpp:62
Metaprogramming tools for transforming functor types.
void announceAdditionalFlow(FrameRate additionalFps)
provide a hint to the self-regulating allocation scheme.
Definition: block-flow.hpp:661
Simple test class runner.
Tiny helper functions and shortcuts to be used everywhere Consider this header to be effectively incl...
Activity & create(ARGS &&...args)
Main API operation: allocate a new Activity record.
Definition: block-flow.hpp:481
A collection of frequently used helper functions to support unit testing.
probe window + count-down; activate next Activity, else re-schedule
Definition: activity.hpp:245
internal engine »heart beat« for internal maintenance hook(s)
Definition: activity.hpp:249
void markEpochUnderflow(TimeVar actualLen, double fillFactor)
On clean-up of past Epochs, the actual fill factor is checked to guess an Epoch duration to make opti...
Definition: block-flow.hpp:631
static const Time NEVER
border condition marker value. NEVER >= any time value
Definition: timevalue.hpp:323
string showType()
diagnostic type output, including const and similar adornments
Offset measures a distance in time.
Definition: timevalue.hpp:367
auto setup(FUN &&workFun)
Helper: setup a Worker-Pool configuration for the test.
Duration is the internal Lumiera time metric.
Definition: timevalue.hpp:477
void discardBefore(Time deadline)
Clean-up all storage related to activities before the given deadline.
Definition: block-flow.hpp:583
lib::time::Time randTime()
create a random but not insane Time value between 1s ...
double boostFactorOverflow() const
< reduced logarithmically, since overflow is detected on individual allocations
Definition: block-flow.hpp:209
a family of time value like entities and their relationships.
basic constant internal time value.
Definition: timevalue.hpp:142
Vault-Layer implementation namespace root.
AllocatorHandle until(Time deadline)
initiate allocations for activities to happen until some deadline
Definition: block-flow.hpp:525
bool isSameObject(A const &a, B const &b)
compare plain object identity, bypassing any custom comparison operators.
Definition: util.hpp:372