88 #include "mdl_controller.h"
89 #include "model_base.h"
92 using namespace std::chrono;
93 using namespace r_code;
97 _Mem::_Mem() : r_code::
Mem(),
101 reduction_core_count_(0),
103 mdl_inertia_sr_thr_(0.9),
104 mdl_inertia_cnt_thr_(6),
106 min_sim_time_horizon_(0),
107 max_sim_time_horizon_(0),
108 sim_time_horizon_factor_(0.3),
109 tpx_time_horizon_(5000000),
110 perf_sampling_period_(250000),
111 float_tolerance_(0.00001),
112 time_tolerance_(10000),
113 primary_thz_(seconds(3600000)),
114 secondary_thz_(seconds(7200000)),
117 goal_pred_success_res_(1000),
118 keep_invalidated_objects_(false),
122 reduction_job_count_(0),
124 time_job_avg_latency_(0),
125 _time_job_avg_latency_(0),
131 default_runtime_output_stream_(&std::cout)
135 objects_.reserve(1024);
136 for (uint32 i = 0; i < RUNTIME_OUTPUT_STREAM_COUNT; ++i)
137 runtime_output_streams_[i] = NULL;
142 for (uint32 i = 0; i < RUNTIME_OUTPUT_STREAM_COUNT; ++i)
143 if (runtime_output_streams_[i] != NULL)
144 delete runtime_output_streams_[i];
147 void _Mem::init(microseconds base_period,
148 uint32 reduction_core_count,
149 uint32 time_core_count,
150 float32 mdl_inertia_sr_thr,
151 uint32 mdl_inertia_cnt_thr,
153 microseconds min_sim_time_horizon,
154 microseconds max_sim_time_horizon,
155 float32 sim_time_horizon_factor,
156 microseconds tpx_time_horizon,
157 microseconds perf_sampling_period,
158 float32 float_tolerance,
159 microseconds time_tolerance,
160 microseconds primary_thz,
161 microseconds secondary_thz,
164 uint32 goal_pred_success_res,
167 bool keep_invalidated_objects) {
169 base_period_ = base_period;
171 reduction_core_count_ = reduction_core_count;
172 time_core_count_ = time_core_count;
174 mdl_inertia_sr_thr_ = mdl_inertia_sr_thr;
175 mdl_inertia_cnt_thr_ = mdl_inertia_cnt_thr;
176 tpx_dsr_thr_ = tpx_dsr_thr;
177 min_sim_time_horizon_ = min_sim_time_horizon;
178 max_sim_time_horizon_ = max_sim_time_horizon;
179 sim_time_horizon_factor_ = sim_time_horizon_factor;
180 tpx_time_horizon_ = tpx_time_horizon;
181 perf_sampling_period_ = perf_sampling_period;
182 float_tolerance_ = float_tolerance;
183 time_tolerance_ = time_tolerance;
184 primary_thz_ = primary_thz;
185 secondary_thz_ = secondary_thz;
189 ntf_mk_res_ = ntf_mk_res;
192 goal_pred_success_res_ = goal_pred_success_res;
194 probe_level_ = probe_level;
195 keep_invalidated_objects_ = keep_invalidated_objects;
197 reduction_job_count_ = time_job_count_ = 0;
198 reduction_job_avg_latency_ = _reduction_job_avg_latency_ = microseconds(0);
199 time_job_avg_latency_ = _time_job_avg_latency_ = microseconds(0);
202 for (uint32 i = 0; i < RUNTIME_OUTPUT_STREAM_COUNT; ++i) {
206 runtime_output_streams_[i] = NULL;
215 return (_Mem::Get()->runtime_output_streams_[l] == NULL ?
216 *_Mem::Get()->default_runtime_output_stream_ : *(_Mem::Get()->runtime_output_streams_[l]));
221 std::ostream &_Mem_Output(TraceLevel l) {
return _Mem::Output(l); }
226 for (i = 0; i < reduction_core_count_; ++i)
227 delete reduction_cores_[i];
228 delete[] reduction_cores_;
229 for (i = 0; i < time_core_count_; ++i)
230 delete time_cores_[i];
231 delete[] time_cores_;
233 delete reduction_job_queue_;
234 delete time_job_queue_;
241 Code *_Mem::get_root()
const {
246 Code *_Mem::get_stdin()
const {
251 Code *_Mem::get_stdout()
const {
256 Code *_Mem::get_self()
const {
263 _Mem::State _Mem::check_state() {
273 void _Mem::start_core() {
275 core_countCS_.enter();
276 if (++core_count_ == 1)
277 stop_sem_->acquire();
278 core_countCS_.leave();
281 void _Mem::shutdown_core() {
283 core_countCS_.enter();
284 if (--core_count_ == 0)
285 stop_sem_->release();
286 core_countCS_.leave();
291 void _Mem::store(
Code *
object) {
294 objects_.push_back(
object, location);
295 object->set_strorage_index(location);
298 bool _Mem::load(
const vector<r_code::Code *> *objects, uint32 stdin_oid, uint32 stdout_oid, uint32 self_oid) {
301 reduction_cores_ =
new ReductionCore *[reduction_core_count_];
302 for (i = 0; i < reduction_core_count_; ++i)
303 reduction_cores_[i] =
new ReductionCore();
304 time_cores_ =
new TimeCore *[time_core_count_];
305 for (i = 0; i < time_core_count_; ++i)
306 time_cores_[i] =
new TimeCore();
308 Utils::SetReferenceValues(base_period_, float_tolerance_, time_tolerance_);
311 root_ = (Group *)(*objects)[0];
312 store((
Code *)root_);
313 initial_groups_.push_back(root_);
316 uint32 highest_oid = 0;
317 for (uint32 i = 0; i < objects->size(); ++i)
318 highest_oid = max(highest_oid, (*objects)[i]->get_oid());
319 set_last_oid(max(highest_oid, objects->size() - 1));
321 for (uint32 i = 1; i < objects->size(); ++i) {
323 Code *
object = (*objects)[i];
326 if (object->get_oid() == stdin_oid)
327 stdin_ = (Group *)(*objects)[i];
328 else if (object->get_oid() == stdout_oid)
329 stdout_ = (Group *)(*objects)[i];
330 else if (object->get_oid() == self_oid)
331 self_ = (*objects)[i];
333 switch (object->code(0).getDescriptor()) {
335 if (Utils::has_reference(&object->code(0), HLP_FWD_GUARDS)) {
336 cerr <<
"ERROR: Illegal referenced object in forward guards of model OID " <<
object->get_oid() << endl;
339 if (Utils::has_reference(&object->code(0), HLP_BWD_GUARDS)) {
340 cerr <<
"ERROR: Illegal referenced object in backward guards of model OID " <<
object->get_oid() << endl;
345 ModelBase::Get()->load(
object);
347 case Atom::COMPOSITE_STATE:
348 if (Utils::has_reference(&object->code(0), HLP_FWD_GUARDS)) {
349 cerr <<
"ERROR: Illegal referenced object in forward guards of cst OID " <<
object->get_oid() << endl;
352 if (Utils::has_reference(&object->code(0), HLP_BWD_GUARDS)) {
353 cerr <<
"ERROR: Illegal referenced object in backward guards of cst OID " <<
object->get_oid() << endl;
358 case Atom::INSTANTIATED_PROGRAM:
359 if (object->get_reference(0)->code(0).asOpcode() == Opcodes::Pgm) {
361 if (object->get_reference(0)->code(object->get_reference(0)->code(PGM_INPUTS).asIndex()).getAtomCount() == 0)
362 object->code(0) = Atom::InstantiatedInputLessProgram(object->code(0).asOpcode(), object->code(0).getAtomCount());
364 object->code(0) = Atom::InstantiatedAntiProgram(object->code(0).asOpcode(), object->code(0).getAtomCount());
368 unordered_set<_View *, _View::Hash, _View::Equal>::const_iterator v;
369 for (v = object->views_.begin(); v != object->views_.end(); ++v) {
372 View *view = (View *)*v;
373 view->set_object(
object);
374 Group *host = view->get_host();
376 if (!host->load(view,
object))
378 if (host == stdin_ && view->get_sync() == View::SYNC_AXIOM &&
379 (view->object_->code(0).asOpcode() == Opcodes::Fact ||
380 view->object_->code(0).asOpcode() == Opcodes::AntiFact))
382 axiom_values_.push_back(view->object_->get_reference(0));
385 if (object->code(0).getDescriptor() == Atom::GROUP)
386 initial_groups_.push_back((Group *)object);
399 static void update_timestamps(Timestamp time_reference,
Atom* code, uint16 index) {
400 Atom atom = code[index];
402 switch (atom.getDescriptor()) {
403 case Atom::TIMESTAMP: {
404 auto ts = Utils::GetTimestamp(code + index).time_since_epoch();
405 if (ts >= Utils_MaxTime - time_reference)
407 Utils::SetTimestamp(code + index, Utils_MaxTime);
409 Utils::SetTimestamp(code + index, ts + time_reference);
413 update_timestamps(time_reference, code, atom.asIndex());
422 uint16 count = atom.getAtomCount();
423 for (uint16 i = 1; i <= count; ++i)
424 update_timestamps(time_reference, code, index + i);
431 for (
auto o = objects.begin(); o != objects.end(); ++o)
432 update_timestamps(time_reference, &(*o)->code(0), 0);
435 Timestamp _Mem::start() {
437 if (state_ != STOPPED && state_ != NOT_STARTED)
438 return Timestamp(seconds(0));
446 vector<std::pair<View *, Group *> > initial_reduction_jobs;
450 Utils::SetTimeReference(now);
451 ModelBase::Get()->set_thz(secondary_thz_);
454 for (i = 0; i < initial_groups_.size(); ++i) {
456 Group *g = initial_groups_[i];
457 bool c_active = g->get_c_act() > g->get_c_act_thr();
458 bool c_salient = g->get_c_sln() > g->get_c_sln_thr();
460 FOR_ALL_VIEWS_BEGIN(g, v)
461 Utils::SetTimestamp<View>(v->second, VIEW_IJT, now);
466 unordered_map<uint32, P<View> >::const_iterator v;
469 for (v = g->input_less_ipgm_views_.begin(); v != g->input_less_ipgm_views_.end(); ++v) {
471 if (v->second->controller_ != NULL && v->second->controller_->is_activated()) {
473 time_job_queue_->push(j);
478 for (v = g->anti_ipgm_views_.begin(); v != g->anti_ipgm_views_.end(); ++v) {
480 if (v->second->controller_ != NULL && v->second->controller_->is_activated()) {
481 P<TimeJob> j =
new AntiPGMSignalingJob(v->second, now + Utils::GetDuration<Code>(v->second->object_, IPGM_TSC));
482 time_job_queue_->push(j);
490 FOR_ALL_VIEWS_BEGIN(g, v)
492 if (v->second->get_sln() > g->get_sln_thr()) {
494 g->newly_salient_views_.insert(v->second);
495 initial_reduction_jobs.push_back(std::pair<View *, Group *>(v->second, g));
500 if (g->get_upr() > 0) {
502 P<TimeJob> j =
new UpdateJob(g, g->get_next_upr_time(now));
503 time_job_queue_->push(j);
507 initial_groups_.clear();
511 P<TimeJob> j =
new PerfSamplingJob(now + perf_sampling_period_, perf_sampling_period_);
512 time_job_queue_->push(j);
514 for (i = 0; i < reduction_core_count_; ++i)
515 reduction_cores_[i]->start(ReductionCore::Run);
516 for (i = 0; i < time_core_count_; ++i)
517 time_cores_[i]->start(TimeCore::Run);
519 for (uint32 i = 0; i < initial_reduction_jobs.size(); ++i)
520 initial_reduction_jobs[i].second->inject_reduction_jobs(initial_reduction_jobs[i].first);
526 if (!(reduction_core_count_ == 0 && time_core_count_ == 0))
532 while (diagnostic_time_state.
step()) {}
538 n_reduction_jobs_this_sampling_period_(0),
539 reduction_job_queue_index_(0) {
541 mem_->on_diagnostic_time_tick();
542 need_diagnostic_time_tick_ =
false;
543 end_time_ = Now() + run_time_;
551 const size_t max_reduction_jobs_per_cycle = 80000;
553 if (mem_->get_state() == _Mem::STOPPED)
557 if (pass_number_ <= 100) {
558 if (reduction_job_queue_index_ == 0) {
565 if (reduction_job == NULL)
568 reduction_job_queue_.push_back(reduction_job);
572 size_t n_jobs_to_run = min(reduction_job_queue_.size(), max_reduction_jobs_per_cycle);
573 if (n_jobs_to_run > 0) {
574 if (reduction_job_queue_index_ < n_jobs_to_run) {
576 reduction_job_queue_[reduction_job_queue_index_]->update(Now());
577 reduction_job_queue_[reduction_job_queue_index_] = NULL;
578 ++reduction_job_queue_index_;
583 reduction_job_queue_index_ = 0;
585 n_reduction_jobs_this_sampling_period_ += n_jobs_to_run;
587 if (reduction_job_queue_.size() > max_reduction_jobs_per_cycle)
589 reduction_job_queue_.erase(
590 reduction_job_queue_.begin(), reduction_job_queue_.begin() + max_reduction_jobs_per_cycle);
592 reduction_job_queue_.clear();
595 if (n_reduction_jobs_this_sampling_period_ < max_reduction_jobs_per_cycle) {
609 P<TimeJob> time_job = mem_->pop_time_job(
false);
610 if (time_job == NULL)
614 ordered_time_job_queue_.insert(
615 upper_bound(ordered_time_job_queue_.begin(),
616 ordered_time_job_queue_.end(), time_job, time_job_compare_),
620 if (Now() >= end_time_)
625 if (!need_diagnostic_time_tick_ &&
626 (ordered_time_job_queue_.size() == 0 ||
627 ordered_time_job_queue_.front()->target_time_ >=
632 _Mem::diagnostic_time_now_ = tick_time_;
634 n_reduction_jobs_this_sampling_period_ = 0;
635 need_diagnostic_time_tick_ =
true;
639 if (need_diagnostic_time_tick_ &&
640 !(ordered_time_job_queue_.size() > 0 &&
641 ordered_time_job_queue_.front()->target_time_ <= tick_time_)) {
642 need_diagnostic_time_tick_ =
false;
643 mem_->on_diagnostic_time_tick();
650 if (ordered_time_job_queue_.size() == 0)
654 if (ordered_time_job_queue_.front()->target_time_ > Now())
656 _Mem::diagnostic_time_now_ = ordered_time_job_queue_.front()->target_time_;
659 P<TimeJob> time_job = ordered_time_job_queue_.front();
660 ordered_time_job_queue_.erase(ordered_time_job_queue_.begin());
662 if (!time_job->is_alive()) {
667 Timestamp next_target(seconds(0));
668 if (!time_job->update(next_target)) {
674 if (next_target.time_since_epoch().count() != 0) {
676 time_job->target_time_ = next_target;
677 ordered_time_job_queue_.insert(
678 upper_bound(ordered_time_job_queue_.begin(),
679 ordered_time_job_queue_.end(), time_job, time_job_compare_),
688 Timestamp _Mem::diagnostic_time_now_ = Timestamp(microseconds(1));
690 Timestamp _Mem::get_diagnostic_time_now() {
return diagnostic_time_now_; }
692 void _Mem::on_diagnostic_time_tick() {}
697 if (state_ != RUNNING) {
705 for (i = 0; i < reduction_core_count_; ++i)
706 reduction_job_queue_->push(r =
new ShutdownReductionCore());
708 for (i = 0; i < time_core_count_; ++i)
709 time_job_queue_->push(t =
new ShutdownTimeCore());
714 for (i = 0; i < time_core_count_; ++i)
715 Thread::Wait(time_cores_[i]);
717 for (i = 0; i < reduction_core_count_; ++i)
718 Thread::Wait(reduction_cores_[i]);
720 stop_sem_->acquire();
729 if (state_ == STOPPED)
731 return reduction_job_queue_->
pop(waitForItem);
734 void _Mem::push_reduction_job(_ReductionJob *j) {
736 if (state_ == STOPPED)
740 reduction_job_queue_->push(_j);
743 P<TimeJob> _Mem::pop_time_job(
bool waitForItem) {
745 if (state_ == STOPPED)
747 return time_job_queue_->pop(waitForItem);
750 void _Mem::push_time_job(TimeJob *j) {
752 if (state_ == STOPPED)
755 time_job_queue_->push(_j);
760 void _Mem::eject(View *view, uint16 node_id) {
769 void _Mem::inject_copy(
View *view,
Group *destination) {
771 View *copied_view =
new View(view, destination);
772 inject_existing_object(copied_view, view->object_, destination);
775 void _Mem::inject_existing_object(View *view,
Code *
object, Group *host) {
777 view->set_object(
object);
778 host->inject_existing_object(view);
781 void _Mem::inject_null_program(Controller *c, Group *group, microseconds time_to_live,
bool take_past_inputs) {
785 Code *null_pgm =
new LObject();
786 null_pgm->code(0) = Atom::NullProgram(take_past_inputs);
788 uint32 res = Utils::GetResilience(now, time_to_live, group->get_upr() * Utils::GetBasePeriod().count());
790 View *view =
new View(View::SYNC_ONCE, now, 0, res, group, NULL, null_pgm, 1);
791 view->controller_ = c;
798 void _Mem::inject_new_object(View *view) {
800 Group *host = view->get_host();
802 switch (view->object_->code(0).getDescriptor()) {
806 host->inject_group(view);
812 host->inject_new_object(view);
823 if (view->object_->get_oid() != UNDEFINED_OID)
825 OUTPUT_LINE(IO_DEVICE_INJ_EJT, Utils::RelativeTime(Now()) <<
" I/O device inject " <<
826 view->object_->get_oid() <<
", ijt " << Utils::RelativeTime(view->get_ijt()));
830 Code* obj,
Code* prop,
Atom val, Timestamp after, Timestamp before,
831 View::SyncMode sync_mode,
Code* group)
838 object->code(0) = Atom::Marker(GetOpcode(
"mk.val"), 4);
839 object->code(1) = Atom::RPointer(0);
840 object->code(2) = Atom::RPointer(1);
841 object->code(3) = val;
842 object->code(4) = Atom::Float(1);
844 object->set_reference(0, obj);
845 object->set_reference(1, prop);
851 Code* obj,
Code* prop, std::vector<Atom> val, Timestamp after, Timestamp before,
852 View::SyncMode sync_mode,
Code* group)
859 uint16 extent_index = 4;
860 object->code(0) = Atom::Marker(GetOpcode(
"mk.val"), 4);
861 object->code(1) = Atom::RPointer(0);
862 object->code(2) = Atom::RPointer(1);
863 object->code(3) = Atom::IPointer(++extent_index);
864 object->code(4) = Atom::Float(1);
867 object->set_reference(0, obj);
868 object->set_reference(1, prop);
869 object->code(extent_index) = Atom::Set(val.size());
870 for (uint16 i = 0; i < val.size(); ++i) {
871 object->code(++extent_index) = val[i];
878 Code* obj,
Code* prop,
Code* val, Timestamp after, Timestamp before,
879 View::SyncMode sync_mode,
Code* group)
886 object->code(0) = Atom::Marker(GetOpcode(
"mk.val"), 4);
887 object->code(1) = Atom::RPointer(0);
888 object->code(2) = Atom::RPointer(1);
889 object->code(3) = Atom::RPointer(2);
890 object->code(4) = Atom::Float(1);
892 object->set_reference(0, obj);
893 object->set_reference(1, prop);
894 object->set_reference(2, val);
900 Code* obj,
Code* prop,
const std::string& val, Timestamp after, Timestamp before, View::SyncMode sync_mode,
Code* group)
907 uint16 extent_index = 4;
908 object->code(0) = Atom::Marker(GetOpcode(
"mk.val"), 4);
909 object->code(1) = Atom::RPointer(0);
910 object->set_reference(0, obj);
911 object->code(2) = Atom::RPointer(1);
912 object->set_reference(1, prop);
913 object->code(3) = Atom::IPointer(++extent_index);
914 object->code(4) = Atom::Float(1);
916 Utils::SetString<Code>(
object, 3, val);
922 Code* obj,
Code* prop,
const vector<Code*>& val, Timestamp after, Timestamp before,
923 View::SyncMode sync_mode,
Code* group)
930 uint16 extent_index = 4;
931 object->code(0) = Atom::Marker(GetOpcode(
"mk.val"), 4);
932 object->code(1) = Atom::RPointer(0);
933 object->code(2) = Atom::RPointer(1);
934 object->code(3) = Atom::IPointer(++extent_index);
935 object->code(4) = Atom::Float(1);
937 object->set_reference(0, obj);
938 object->set_reference(1, prop);
939 object->code(extent_index) = Atom::Set(val.size());
940 for (uint16 i = 0; i < val.size(); ++i) {
941 object->code(++extent_index) = Atom::RPointer(object->references_size());
942 object->set_reference(object->references_size(), val[i]);
949 Code* obj,
Code* prop, uint16 opcode,
const vector<Atom>& vals, Timestamp after, Timestamp before,
950 View::SyncMode sync_mode,
Code* group)
956 Code*
object =
new LObject(
this);
957 uint16 extent_index = 4;
958 object->code(0) = Atom::Marker(GetOpcode(
"mk.val"), 4);
959 object->code(1) = Atom::RPointer(0);
960 object->set_reference(0, obj);
961 object->code(2) = Atom::RPointer(1);
962 object->set_reference(1, prop);
963 object->code(3) = Atom::IPointer(++extent_index);
964 object->code(4) = Atom::Float(1);
966 object->code(extent_index++) = Atom::Object(opcode, vals.size());
967 for (uint16 i = 0; i < vals.size(); ++i) {
968 object->code(extent_index++) = vals[i];
975 Code*
object, Timestamp after, Timestamp before, View::SyncMode sync_mode,
979 Code* fact =
new Fact(
object, after, before, 1, 1);
982 View *view =
new View(sync_mode, after, 1, 1, group, NULL, fact);
991 if (view->object_->is_invalidated())
994 Group *host = view->get_host();
996 if (host->is_invalidated())
1000 auto ijt = view->get_ijt();
1002 if (view->object_->is_registered()) {
1005 inject_existing_object(view, view->object_, host);
1009 time_job_queue_->push(j);
1014 inject_new_object(view);
1018 time_job_queue_->push(j);
1023 void _Mem::inject_async(
View *view) {
1025 if (view->object_->is_invalidated())
1028 Group *host = view->get_host();
1030 if (host->is_invalidated())
1034 auto ijt = view->get_ijt();
1039 reduction_job_queue_->push(j);
1042 if (view->object_->is_registered()) {
1045 time_job_queue_->push(j);
1048 P<TimeJob> j =
new InjectionJob(view, ijt,
false);
1049 time_job_queue_->push(j);
1054 void _Mem::inject_hlps(vector<View *> views, Group *destination) {
1056 vector<View *>::const_iterator view;
1057 for (view = views.begin(); view != views.end(); ++view)
1060 destination->inject_hlps(views);
1063 void _Mem::inject_notification(View *view,
bool lock) {
1065 Group *host = view->get_host();
1069 host->inject_notification(view, lock);
1074 void _Mem::register_reduction_job_latency(microseconds latency) {
1076 reduction_jobCS_.enter();
1077 ++reduction_job_count_;
1078 reduction_job_avg_latency_ += latency;
1079 reduction_jobCS_.leave();
1081 void _Mem::register_time_job_latency(microseconds latency) {
1083 time_jobCS_.enter();
1085 time_job_avg_latency_ += latency;
1086 time_jobCS_.leave();
1089 void _Mem::inject_perf_stats() {
1091 reduction_jobCS_.enter();
1092 time_jobCS_.enter();
1094 microseconds d_reduction_job_avg_latency;
1095 if (reduction_job_count_ > 0) {
1097 reduction_job_avg_latency_ /= reduction_job_count_;
1098 d_reduction_job_avg_latency = reduction_job_avg_latency_ - _reduction_job_avg_latency_;
1100 reduction_job_avg_latency_ = d_reduction_job_avg_latency = microseconds(0);
1102 microseconds d_time_job_avg_latency;
1103 if (time_job_count_ > 0) {
1105 time_job_avg_latency_ /= time_job_count_;
1106 d_time_job_avg_latency = time_job_avg_latency_ - _time_job_avg_latency_;
1108 time_job_avg_latency_ = d_time_job_avg_latency = microseconds(0);
1110 Code *perf =
new Perf(reduction_job_avg_latency_, d_reduction_job_avg_latency, time_job_avg_latency_, d_time_job_avg_latency);
1113 reduction_job_count_ = time_job_count_ = 0;
1114 _reduction_job_avg_latency_ = reduction_job_avg_latency_;
1115 _time_job_avg_latency_ = time_job_avg_latency_;
1117 time_jobCS_.leave();
1118 reduction_jobCS_.leave();
1122 Code *f_perf =
new Fact(perf, now, now + perf_sampling_period_, 1, 1);
1123 View *view =
new View(View::SYNC_ONCE, now, 1, 1, stdin_, NULL, f_perf);
1129 void _Mem::propagate_sln(
Code *
object, float32 change, float32 source_sln_thr) {
1135 object->acq_views();
1137 if (object->views_.size() == 0) {
1139 object->invalidate();
1140 object->rel_views();
1144 unordered_set<_View *, _View::Hash, _View::Equal>::const_iterator it;
1145 for (it = object->views_.begin(); it != object->views_.end(); ++it) {
1147 float32 morphed_sln_change = View::MorphChange(change, source_sln_thr, ((View*)*it)->get_host()->get_sln_thr());
1148 if (morphed_sln_change != 0)
1149 ((View*)*it)->get_host()->pending_operations_.push_back(
new Group::Mod(((View*)*it)->get_oid(), VIEW_SLN, morphed_sln_change));
1151 object->rel_views();
1154 void _Mem::unpack_hlp(
Code *hlp) {
1156 Code *unpacked_hlp =
new LObject();
1158 for (uint16 i = 0; i < hlp->code_size(); ++i)
1159 unpacked_hlp->code(i) = hlp->code(i);
1161 uint16 pattern_set_index = hlp->code(HLP_OBJS).asIndex();
1162 uint16 pattern_count = hlp->code(pattern_set_index).getAtomCount();
1163 for (uint16 i = 1; i <= pattern_count; ++i) {
1165 Code *fact = unpack_fact(hlp, hlp->code(pattern_set_index + i).asIndex());
1166 unpacked_hlp->add_reference(fact);
1167 unpacked_hlp->code(pattern_set_index + i) = Atom::RPointer(unpacked_hlp->references_size() - 1);
1170 uint16 group_set_index = hlp->code(HLP_OUT_GRPS).asIndex();
1171 uint16 group_count = hlp->code(group_set_index++).getAtomCount();
1172 for (uint16 i = 0; i < group_count; ++i) {
1174 unpacked_hlp->add_reference(hlp->get_reference(hlp->code(group_set_index + i).asIndex()));
1175 unpacked_hlp->code(group_set_index + i) = Atom::RPointer(unpacked_hlp->references_size() - 1);
1178 uint16 invalid_point = pattern_set_index + pattern_count + 1;
1179 uint16 valid_point = hlp->code(HLP_FWD_GUARDS).asIndex();
1180 uint16 invalid_zone_length = valid_point - invalid_point;
1181 for (uint16 i = valid_point; i < hlp->code_size(); ++i) {
1183 Atom h_atom = unpacked_hlp->code(i);
1184 switch (h_atom.getDescriptor()) {
1186 unpacked_hlp->code(i - invalid_zone_length) = Atom::IPointer(h_atom.asIndex() - invalid_zone_length);
1188 case Atom::ASSIGN_PTR:
1189 unpacked_hlp->code(i - invalid_zone_length) = Atom::AssignmentPointer(h_atom.asAssignmentIndex(), h_atom.asIndex() - invalid_zone_length);
1192 unpacked_hlp->code(i - invalid_zone_length) = h_atom;
1198 unpacked_hlp->code(HLP_FWD_GUARDS) = Atom::IPointer(hlp->code(HLP_FWD_GUARDS).asIndex() - invalid_zone_length);
1199 unpacked_hlp->code(HLP_BWD_GUARDS) = Atom::IPointer(hlp->code(HLP_BWD_GUARDS).asIndex() - invalid_zone_length);
1200 unpacked_hlp->code(HLP_OUT_GRPS) = Atom::IPointer(hlp->code(HLP_OUT_GRPS).asIndex() - invalid_zone_length);
1202 uint16 unpacked_code_length = hlp->code_size() - invalid_zone_length;
1203 unpacked_hlp->resize_code(unpacked_code_length);
1204 hlp->add_reference(unpacked_hlp);
1207 Code *_Mem::unpack_fact(
Code *hlp, uint16 fact_index) {
1209 Code *fact =
new LObject();
1211 uint16 fact_size = hlp->code(fact_index).getAtomCount() + 1;
1212 for (uint16 i = 0; i < fact_size; ++i) {
1214 Atom h_atom = hlp->code(fact_index + i);
1215 switch (h_atom.getDescriptor()) {
1217 fact->code(i) = Atom::RPointer(fact->references_size());
1218 fact_object = unpack_fact_object(hlp, h_atom.asIndex());
1219 fact->add_reference(fact_object);
1222 fact->code(i) = Atom::RPointer(fact->references_size());
1223 fact->add_reference(hlp->get_reference(h_atom.asIndex()));
1226 fact->code(i) = h_atom;
1234 Code *_Mem::unpack_fact_object(
Code *hlp, uint16 fact_object_index) {
1236 Code *fact_object =
new LObject();
1237 _unpack_code(hlp, fact_object_index, fact_object, fact_object_index);
1241 void _Mem::_unpack_code(
Code *hlp, uint16 fact_object_index,
Code *fact_object, uint16 read_index) {
1243 Atom h_atom = hlp->code(read_index);
1244 uint16 code_size = h_atom.getAtomCount() + 1;
1245 uint16 write_index = read_index - fact_object_index;
1246 for (uint16 i = 0; i < code_size; ++i) {
1248 switch (h_atom.getDescriptor()) {
1250 fact_object->code(write_index + i) = Atom::RPointer(fact_object->references_size());
1251 fact_object->add_reference(hlp->get_reference(h_atom.asIndex()));
1254 fact_object->code(write_index + i) = Atom::IPointer(h_atom.asIndex() - fact_object_index);
1255 _unpack_code(hlp, fact_object_index, fact_object, h_atom.asIndex());
1258 fact_object->code(write_index + i) = h_atom;
1262 h_atom = hlp->code(read_index + i + 1);
1266 void _Mem::pack_hlp(
Code *hlp)
const {
1268 Code *unpacked_hlp = clone(hlp);
1270 vector<Atom> trailing_code;
1271 uint16 trailing_code_index = hlp->code(HLP_FWD_GUARDS).asIndex();
1272 for (uint16 i = trailing_code_index; i < hlp->code_size(); ++i)
1273 trailing_code.push_back(hlp->code(i));
1275 uint16 group_set_index = hlp->code(HLP_OUT_GRPS).asIndex();
1276 uint16 group_count = hlp->code(group_set_index).getAtomCount();
1278 vector<P<Code> > references;
1280 uint16 pattern_set_index = hlp->code(HLP_OBJS).asIndex();
1281 uint16 pattern_count = hlp->code(pattern_set_index).getAtomCount();
1282 uint16 insertion_point = pattern_set_index + pattern_count + 1;
1283 uint16 extent_index = insertion_point;
1284 for (uint16 i = 0; i < pattern_count; ++i) {
1286 Code *pattern_object = hlp->get_reference(i);
1287 hlp->code(pattern_set_index + i + 1) = Atom::IPointer(extent_index);
1288 pack_fact(pattern_object, hlp, extent_index, &references);
1291 uint16 inserted_zone_length = extent_index - insertion_point;
1293 for (uint16 i = 0; i < trailing_code.size(); ++i) {
1295 Atom t_atom = trailing_code[i];
1296 switch (t_atom.getDescriptor()) {
1298 hlp->code(i + extent_index) = Atom::IPointer(t_atom.asIndex() + inserted_zone_length);
1300 case Atom::ASSIGN_PTR:
1301 hlp->code(i + extent_index) = Atom::AssignmentPointer(t_atom.asAssignmentIndex(), t_atom.asIndex() + inserted_zone_length);
1304 hlp->code(i + extent_index) = t_atom;
1310 hlp->code(HLP_FWD_GUARDS) = Atom::IPointer(hlp->code(HLP_FWD_GUARDS).asIndex() + inserted_zone_length);
1311 hlp->code(HLP_BWD_GUARDS) = Atom::IPointer(hlp->code(HLP_BWD_GUARDS).asIndex() + inserted_zone_length);
1312 hlp->code(HLP_OUT_GRPS) = Atom::IPointer(hlp->code(HLP_OUT_GRPS).asIndex() + inserted_zone_length);
1314 group_set_index += inserted_zone_length;
1315 for (uint16 i = 1; i <= group_count; ++i) {
1317 references.push_back(hlp->get_reference(hlp->code(group_set_index + i).asIndex()));
1318 hlp->code(group_set_index + i) = Atom::RPointer(references.size() - 1);
1321 hlp->set_references(references);
1323 hlp->add_reference(unpacked_hlp);
1326 void _Mem::pack_fact(
Code *fact,
Code *hlp, uint16 &write_index, vector<
P<Code> > *references) {
1328 uint16 extent_index = write_index + fact->code_size();
1329 for (uint16 i = 0; i < fact->code_size(); ++i) {
1331 Atom p_atom = fact->code(i);
1332 switch (p_atom.getDescriptor()) {
1334 hlp->code(write_index) = Atom::IPointer(extent_index);
1335 pack_fact_object(fact->get_reference(p_atom.asIndex()), hlp, extent_index, references);
1339 hlp->code(write_index) = p_atom;
1344 write_index = extent_index;
1347 void _Mem::pack_fact_object(
Code *fact_object,
Code *hlp, uint16 &write_index, vector<
P<Code> > *references) {
1349 uint16 extent_index = write_index + fact_object->code_size();
1350 uint16 offset = write_index;
1351 for (uint16 i = 0; i < fact_object->code_size(); ++i) {
1353 Atom p_atom = fact_object->code(i);
1354 switch (p_atom.getDescriptor()) {
1356 Code *reference = fact_object->get_reference(p_atom.asIndex());
1358 for (uint16 i = 0; i < references->size(); ++i) {
1360 if ((*references)[i] == reference) {
1362 hlp->code(write_index) = Atom::RPointer(i);
1369 hlp->code(write_index) = Atom::RPointer(references->size());
1370 references->push_back(reference);
1375 hlp->code(write_index) = Atom::IPointer(offset + p_atom.asIndex());
1379 hlp->code(write_index) = p_atom;
1388 uint32 oid = Seed.object_names_.
findSymbol(name);
1389 if (oid == UNDEFINED_OID)
1393 for (uint32 i = 0; i < objects->size(); ++i) {
1394 Code *
object = (*objects)[i];
1395 if (object->get_oid() == oid)
1402 Code *_Mem::clone(
Code *original)
const {
1404 Code *_clone = build_object(original->code(0));
1405 uint16 opcode = original->code(0).asOpcode();
1406 if (opcode == Opcodes::Ont || opcode == Opcodes::Ent)
1409 for (uint16 i = 0; i < original->code_size(); ++i)
1410 _clone->code(i) = original->code(i);
1411 for (uint16 i = 0; i < original->references_size(); ++i)
1412 _clone->add_reference(original->get_reference(i));
1418 for (
auto axiom = axiom_values_.begin(); axiom != axiom_values_.end(); ++axiom) {
1431 image->timestamp_ = Now();
1434 ModelBase::Get()->get_models(models);
1442 MemStatic::MemStatic() : _Mem(), last_oid_(-1) {
1445 MemStatic::~MemStatic() {
1448 void MemStatic::bind(View *view) {
1450 Code *
object = view->object_;
1451 object->views_.insert(view);
1453 object->set_oid(++last_oid_);
1454 if (object->code(0).getDescriptor() == Atom::NULL_PROGRAM) {
1460 objects_.push_back(
object, location);
1461 object->set_strorage_index(location);
1464 void MemStatic::set_last_oid(int32 oid) {
1476 if (!keep_invalidated_objects_) {
1478 objects_.erase(object->get_storage_index());
1483 r_comp::Image *MemStatic::get_objects(
bool include_invalidated) {
1486 image->timestamp_ = Now();
1489 image->
add_objects(objects_, include_invalidated);
1497 MemVolatile::MemVolatile() : _Mem(), last_oid_(-1) {
1500 MemVolatile::~MemVolatile() {
1503 uint32 MemVolatile::get_oid() {
1508 void MemVolatile::set_last_oid(int32 oid) {
1513 void MemVolatile::bind(View *view) {
1515 Code *
object = view->object_;
1516 object->views_.insert(view);
1517 object->set_oid(get_oid());