AERA
pgm_overlay.cpp
1 //_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
2 //_/_/
3 //_/_/ AERA
4 //_/_/ Autocatalytic Endogenous Reflective Architecture
5 //_/_/
6 //_/_/ Copyright (c) 2018-2025 Jeff Thompson
7 //_/_/ Copyright (c) 2018-2025 Kristinn R. Thorisson
8 //_/_/ Copyright (c) 2018-2025 Icelandic Institute for Intelligent Machines
9 //_/_/ Copyright (c) 2018 Thor Tomasarson
10 //_/_/ http://www.iiim.is
11 //_/_/
12 //_/_/ Copyright (c) 2010-2012 Eric Nivel
13 //_/_/ Center for Analysis and Design of Intelligent Agents
14 //_/_/ Reykjavik University, Menntavegur 1, 102 Reykjavik, Iceland
15 //_/_/ http://cadia.ru.is
16 //_/_/
17 //_/_/ Part of this software was developed by Eric Nivel
18 //_/_/ in the HUMANOBS EU research project, which included
19 //_/_/ the following parties:
20 //_/_/
21 //_/_/ Autonomous Systems Laboratory
22 //_/_/ Technical University of Madrid, Spain
23 //_/_/ http://www.aslab.org/
24 //_/_/
25 //_/_/ Communicative Machines
26 //_/_/ Edinburgh, United Kingdom
27 //_/_/ http://www.cmlabs.com/
28 //_/_/
29 //_/_/ Istituto Dalle Molle di Studi sull'Intelligenza Artificiale
30 //_/_/ University of Lugano and SUPSI, Switzerland
31 //_/_/ http://www.idsia.ch/
32 //_/_/
33 //_/_/ Institute of Cognitive Sciences and Technologies
34 //_/_/ Consiglio Nazionale delle Ricerche, Italy
35 //_/_/ http://www.istc.cnr.it/
36 //_/_/
37 //_/_/ Dipartimento di Ingegneria Informatica
38 //_/_/ University of Palermo, Italy
39 //_/_/ http://diid.unipa.it/roboticslab/
40 //_/_/
41 //_/_/
42 //_/_/ --- HUMANOBS Open-Source BSD License, with CADIA Clause v 1.0 ---
43 //_/_/
44 //_/_/ Redistribution and use in source and binary forms, with or without
45 //_/_/ modification, is permitted provided that the following conditions
46 //_/_/ are met:
47 //_/_/ - Redistributions of source code must retain the above copyright
48 //_/_/ and collaboration notice, this list of conditions and the
49 //_/_/ following disclaimer.
50 //_/_/ - Redistributions in binary form must reproduce the above copyright
51 //_/_/ notice, this list of conditions and the following disclaimer
52 //_/_/ in the documentation and/or other materials provided with
53 //_/_/ the distribution.
54 //_/_/
55 //_/_/ - Neither the name of its copyright holders nor the names of its
56 //_/_/ contributors may be used to endorse or promote products
57 //_/_/ derived from this software without specific prior
58 //_/_/ written permission.
59 //_/_/
60 //_/_/ - CADIA Clause: The license granted in and to the software
61 //_/_/ under this agreement is a limited-use license.
62 //_/_/ The software may not be used in furtherance of:
63 //_/_/ (i) intentionally causing bodily injury or severe emotional
64 //_/_/ distress to any person;
65 //_/_/ (ii) invading the personal privacy or violating the human
66 //_/_/ rights of any person; or
67 //_/_/ (iii) committing or preparing for any act of war.
68 //_/_/
69 //_/_/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
70 //_/_/ CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
71 //_/_/ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
72 //_/_/ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
73 //_/_/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
74 //_/_/ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
75 //_/_/ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
76 //_/_/ BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
77 //_/_/ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
78 //_/_/ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
79 //_/_/ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
80 //_/_/ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
81 //_/_/ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
82 //_/_/ OF SUCH DAMAGE.
83 //_/_/
84 //_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
85 
86 #include "pgm_overlay.h"
87 #include "pgm_controller.h"
88 #include "mem.h"
89 #include "group.h"
90 #include "opcodes.h"
91 #include "context.h"
92 #include "callbacks.h"
93 
94 using namespace std;
95 using namespace r_code;
96 
97 // pgm layout:
98 //
99 // index content
100 //
101 // PGM_TPL_ARGS >iptr to the tpl args set
102 // PGM_INPUTS >iptr to the pattern set
103 // PGM_GUARDS >iptr to the guard set
104 // PGM_PRODS >iptr to the production set
105 // pgm_code[PGM_TPL_ARGS] >tpl arg set #n0
106 // pgm_code[PGM_TPL_ARGS]+1 >iptr to first tpl pattern
107 // ... >...
108 // pgm_code[PGM_TPL_ARGS]+n0 >iptr to last tpl pattern
109 // pgm_code[pgm_code[PGM_TPL_ARGS]+1] >opcode of the first tpl pattern
110 // ... >...
111 // pgm_code[pgm_code[PGM_TPL_ARGS]+n0] >opcode of the last tpl pattern
112 // pgm_code[PGM_INPUTS] >input pattern set #n1
113 // pgm_code[PGM_INPUTS]+1 >iptr to first input pattern
114 // ... >...
115 // pgm_code[PGM_INPUTS]+n1 >iptr to last input pattern
116 // pgm_code[pgm_code[PGM_INPUTS]+1] >opcode of the first input pattern
117 // ... >...
118 // pgm_code[pgm_code[PGM_INPUTS]+n1] >opcode of the last input pattern
119 // ... >...
120 // pgm_code[PGM_GUARDS] >guard set #n2
121 // pgm_code[PGM_GUARDS]+1 >iptr to first guard
122 // ... >...
123 // pgm_code[PGM_GUARDS]+n2 >iptr to last guard
124 // pgm_code[pgm_code[PGM_GUARDS]+1] >opcode of the first guard
125 // ... >...
126 // pgm_code[pgm_code[PGM_GUARDS]+n2] >opcode of the last guard
127 // ... >...
128 // pgm_code[PGM_PRODS] >production set #n3
129 // pgm_code[PGM_PRODS]+1 >iptr to first production
130 // ... >...
131 // pgm_code[PGM_PRODS]+n3 >iptr to last production
132 // pgm_code[pgm_code[PGM_PRODS]+1] >opcode of the first production
133 // ... >...
134 // pgm_code[pgm_code[PGM_PRODS]+n3] >opcode of the last production
135 // ... >...
136 
137 using namespace std::chrono;
138 
139 namespace r_exec {
140 
141 InputLessPGMOverlay::InputLessPGMOverlay() : Overlay() { // used for constructing PGMOverlay offsprings.
142 }
143 
144 InputLessPGMOverlay::InputLessPGMOverlay(Controller *c) : Overlay(c) {
145 
146  patch_tpl_args();
147 }
148 
149 InputLessPGMOverlay::~InputLessPGMOverlay() {
150 }
151 
152 void InputLessPGMOverlay::reset() {
153 
154  Overlay::reset();
155 
156  patch_tpl_args();
157 
158  patch_indices_.clear();
159  value_commit_index_ = 0;
160  values_.clear();
161  productions_.clear();
162 }
163 
164 bool InputLessPGMOverlay::evaluate(uint16 index) {
165 
166  IPGMContext c(get_object()->get_reference(0), get_view(), code_, index, this);
167  return c.evaluate();
168 }
169 
170 void InputLessPGMOverlay::patch_tpl_args() { // no rollback on that part of the code.
171  uint16 tpl_arg_set_index = code_[PGM_TPL_ARGS].asIndex(); // index to the set of all tpl patterns.
172  uint16 arg_count = code_[tpl_arg_set_index].getAtomCount();
173  uint16 ipgm_arg_set_index = get_object()->code(IPGM_ARGS).asIndex(); // index to the set of all ipgm tpl args.
174  for (uint16 i = 1; i <= arg_count; ++i) { // pgm_code[tpl_arg_set_index+i] is an iptr to a pattern.
175 
176  Atom &skel_iptr = code_[code_[tpl_arg_set_index + i].asIndex() + 1];
177  uint16 pgm_code_index = code_[tpl_arg_set_index + i].asIndex();
178 
179  patch_tpl_code(pgm_code_index, get_object()->code(ipgm_arg_set_index + i).asIndex());
180  skel_iptr = Atom::IPGMPointer(ipgm_arg_set_index + i); // patch the pgm code with ptrs to the tpl args' actual location in the ipgm code.
181  }
182 }
183 
184 void InputLessPGMOverlay::patch_tpl_code(uint16 pgm_code_index, uint16 ipgm_code_index) { // patch recursively : in pgm_code[index] with IPGM_PTRs until ::.
185 
186  uint16 atom_count = code_[pgm_code_index].getAtomCount();
187  for (uint16 j = 1; j <= atom_count; ++j) {
188 
189  switch (code_[pgm_code_index + j].getDescriptor()) {
190  case Atom::WILDCARD:
191  code_[pgm_code_index + j] = Atom::IPGMPointer(ipgm_code_index + j);
192  break;
193  case Atom::T_WILDCARD: // leave as is and stop patching.
194  return;
195  case Atom::I_PTR:
196  patch_tpl_code(code_[pgm_code_index + j].asIndex(), get_object()->code(ipgm_code_index + j).asIndex());
197  break;
198  default: // leave as is.
199  break;
200  }
201  }
202 }
203 
204 void InputLessPGMOverlay::patch_input_code(uint16 pgm_code_index, uint16 input_index, uint16 input_code_index, int16 parent_index) {
205 }
206 
207 bool InputLessPGMOverlay::inject_productions() {
208 
209  auto now = Now();
210 
211  bool in_red = false; // if prods are computed by red, we have to evaluate the expression; otherwise, we have to evaluate the prods in the set one by one to be able to reference new objects in this->productions.
212  IPGMContext prods(get_object()->get_reference(0), get_view(), code_, code_[PGM_PRODS].asIndex(), this);
213  if (prods[0].getDescriptor() != Atom::SET) { // prods[0] is not a set: it is assumed to be an expression lead by red.
214 
215  in_red = true;
216  if (!prods.evaluate()) {
217 
218  rollback();
219  productions_.clear();
220  return false;
221  }
222  prods = prods.dereference();
223  }
224  uint16 production_count = prods.get_children_count();
225  uint16 cmd_count = 0; // cmds to the executive (excl. mod/set) and external devices.
226  for (uint16 i = 1; i <= production_count; ++i) {
227 
228  IPGMContext cmd = prods.get_child_deref(i);
229  if (!in_red && !cmd.evaluate()) {
230 
231  rollback();
232  productions_.clear();
233  return false;
234  }
235  IPGMContext function = cmd.get_child_deref(1);
236 
237  // layout of a command:
238  // 0 >icmd opcode
239  // 1 >function
240  // 2 >iptr to the set of arguments
241  // 3 >set
242  // 4 >first arg
243  // or:
244  // 0 >cmd opcode
245  // 1 >function
246  // 2 >iptr to the set of arguments
247  // 3 >psln_thr
248  // 4 >set
249  // 5 >first arg
250 
251  // identify the production of new objects.
252  IPGMContext args = cmd.get_child_deref(2);
253  if (cmd[0].asOpcode() == Opcodes::ICmd) {
254 
255  if (function[0].asOpcode() == Opcodes::Inject ||
256  function[0].asOpcode() == Opcodes::Eject) { // args:[object view]; create an object if not a reference.
257 
258  Code *object;
259  IPGMContext arg1 = args.get_child(1);
260  uint16 index = arg1.getIndex();
261  arg1 = arg1.dereference();
262  if (arg1.is_reference())
263  productions_.push_back(arg1.get_object());
264  else {
265 
266  object = _Mem::Get()->build_object(arg1[0]);
267  arg1.copy(object, 0);
268  productions_.push_back(_Mem::Get()->check_existence(object));
269  }
270  patch_code(index, Atom::ProductionPointer(productions_.size() - 1));
271 
272  ++cmd_count;
273  } else if (function[0].asOpcode() != Opcodes::Mod &&
274  function[0].asOpcode() != Opcodes::Set &&
275  function[0].asOpcode() != Opcodes::Prb)
276  ++cmd_count;
277  } else {
278  ++cmd_count;
279  if (cmd[0].asOpcode() == Opcodes::Cmd)
280  // We will also add the fact of the ejection to the set of productions.
281  ++cmd_count;
282  }
283  }
284 
285  Code *mk_rdx = NULL;
286  uint16 ntf_grp_count = get_view()->get_host()->get_ntf_grp_count();
287 
288  uint16 write_index;
289  uint16 mk_rdx_prod_index;
290  uint16 extent_index;
291  if (ntf_grp_count && cmd_count && (get_object()->code(IPGM_NFR).asBoolean())) { // the productions are command objects (cmd); only injections/ejections and cmds to external devices are notified.
292 
293  mk_rdx = get_mk_rdx(write_index);
294  mk_rdx_prod_index = write_index;
295  mk_rdx->code(write_index++) = Atom::Set(cmd_count);
296  extent_index = write_index + cmd_count;
297  }
298 
299  // all productions have evaluated correctly; now we can execute the commands one by one.
300  for (uint16 i = 1; i <= production_count; ++i) {
301 
302  IPGMContext cmd = prods.get_child_deref(i);
303  IPGMContext function = cmd.get_child_deref(1);
304 
305  // call device functions.
306  IPGMContext args = cmd.get_child_deref(2);
307  if (cmd[0].asOpcode() == Opcodes::ICmd) { // command to the executive.
308 
309  if (function[0].asOpcode() == Opcodes::Inject) { // args:[object view]; retrieve the object and create a view.
310 
311  IPGMContext arg1 = args.get_child(1);
312  arg1.dereference_once();
313  Code *object = args.get_child_deref(1).get_object();
314  IPGMContext _view = args.get_child_deref(2);
315  if (_view[0].getAtomCount() != 0) { // regular view (i.e. not |[]).
316 
317  View *view = new View();
318  _view.copy(view, 0);
319  view->set_object(object);
320 
321  view->references_[1] = get_view()->get_host();
322  view->code(VIEW_ORG) = Atom::RPointer(1);
323 
324  _Mem::Get()->inject(view);
325 
326  if (mk_rdx) {
327 
328  mk_rdx->code(write_index++) = Atom::IPointer(extent_index);
329  prods.get_child_deref(i).copy(mk_rdx, extent_index, extent_index);
330  }
331  } else // this allows building objects with no view (case in point: fact on object: only the fact needs to be injected).
332  --cmd_count;
333  } else if (function[0].asOpcode() == Opcodes::Eject) { // args:[object view destination_node]; view.grp=destination grp (stdin ot stdout); retrieve the object and create a view.
334 
335  Code *object = args.get_child_deref(1).get_object();
336 
337  IPGMContext _view = args.get_child_deref(2);
338  View *view = new View();
339  _view.copy(view, 0);
340  view->set_object(object);
341 
342  IPGMContext node = args.get_child_deref(3);
343 
344  _Mem::Get()->eject(view, node[0].getNodeID());
345 
346  if (mk_rdx) {
347 
348  mk_rdx->code(write_index++) = Atom::IPointer(extent_index);
349  prods.get_child_deref(i).copy(mk_rdx, extent_index, extent_index);
350  }
351  } else if (function[0].asOpcode() == Opcodes::Mod) { // args:[iptr-to-cptr value_].
352 
353  void *object;
354  IPGMContext::ObjectType object_type;
355  int16 member_index;
356  uint32 view_oid;
357  args.get_child(1).getMember(object, view_oid, object_type, member_index); // args.get_child(1) is an iptr.
358 
359  if (object) {
360 
361  float32 value = args.get_child_deref(2)[0].asFloat();
362  switch (object_type) {
363  case IPGMContext::TYPE_VIEW: { // add the target and value to the group's pending operations.
364 
365  Group *g = (Group *)object;
366  g->enter();
367  g->pending_operations_.push_back(new Group::Mod(view_oid, member_index, value));
368  g->leave();
369  break;
370  }case IPGMContext::TYPE_OBJECT:
371  ((Code *)object)->mod(member_index, value); // protected internally.
372  break;
373  case IPGMContext::TYPE_GROUP:
374  ((Group *)object)->enter();
375  ((Group *)object)->mod(member_index, value);
376  ((Group *)object)->leave();
377  break;
378  default:
379  rollback();
380  productions_.clear();
381  return false;
382  }
383  }
384  } else if (function[0].asOpcode() == Opcodes::Set) { // args:[iptr-to-cptr value_].
385 
386  void *object;
387  IPGMContext::ObjectType object_type;
388  int16 member_index;
389  uint32 view_oid;
390  args.get_child(1).getMember(object, view_oid, object_type, member_index); // args.get_child(1) is an iptr.
391 
392  if (object) {
393 
394  float32 value = args.get_child_deref(2)[0].asFloat();
395  switch (object_type) {
396  case IPGMContext::TYPE_VIEW: { // add the target and value to the group's pending operations.
397 
398  Group *g = (Group *)object;
399  g->enter();
400  g->pending_operations_.push_back(new Group::Set(view_oid, member_index, value));
401  g->leave();
402  break;
403  }case IPGMContext::TYPE_OBJECT:
404  ((Code *)object)->set(member_index, value); // protected internally.
405  break;
406  case IPGMContext::TYPE_GROUP:
407  ((Group *)object)->enter();
408  ((Group *)object)->set(member_index, value);
409  ((Group *)object)->leave();
410  break;
411  }
412  }
413  } else if (function[0].asOpcode() == Opcodes::NewClass) { // TODO
414 
415  } else if (function[0].asOpcode() == Opcodes::DelClass) { // TODO
416 
417  } else if (function[0].asOpcode() == Opcodes::LDC) { // TODO
418 
419  } else if (function[0].asOpcode() == Opcodes::Swap) { // TODO
420 
421  } else if (function[0].asOpcode() == Opcodes::Prb) { // args:[probe_level,callback_name,msg,set of objects].
422 
423  float32 probe_lvl = args.get_child_deref(1)[0].asFloat();
424  if (probe_lvl < _Mem::Get()->get_probe_level()) {
425 
426  std::string callback_name = Utils::GetString(&args.get_child_deref(2)[0]);
427 
428  Callbacks::Callback callback = Callbacks::Get(callback_name);
429  if (callback) {
430 
431  std::string msg = Utils::GetString(&args.get_child_deref(3)[0]);
432  IPGMContext _objects = args.get_child_deref(4);
433 
434  uint8 object_count = _objects[0].getAtomCount();
435  Code **objects = NULL;
436  if (object_count) {
437 
438  objects = new Code *[object_count];
439  for (uint8 i = 1; i <= object_count; ++i)
440  objects[i - 1] = _objects.get_child_deref(i).get_object();
441  }
442 
443  callback(duration_cast<microseconds>(now - Utils::GetTimeReference()), false, msg.c_str(), object_count, objects);
444  if (object_count)
445  delete[] objects;
446  }
447  }
448  } else if (function[0].asOpcode() == Opcodes::Stop) { // no args.
449 
450  _Mem::Get()->stop();
451  } else { // unknown function.
452 
453  rollback();
454  productions_.clear();
455  return false;
456  }
457  } else if (cmd[0].asOpcode() == Opcodes::Cmd) { // command to an external device, build a cmd object and send it.
458 
459  P<Code> command = _Mem::Get()->build_object(cmd[0]);
460  cmd.copy((Code*)command, 0);
461 
462  Code* executed_command = _Mem::Get()->eject(command);
463 
464  // Build a fact of the command and inject it in stdin. Give the fact an uncertainty range since we don't know when
465  // it will be executed. Otherwise a fact with zero duration may not overlap a fact, making predictions fail.
466  // We offset the beginning of the uncertainty range at a minimum by 2*GetTimeTolerance() from the frame start (the same as SYNC_HOLD)
467  // so that CTPX::reduce will not fail due to "cause in sync with the premise".
468  auto relative_time = duration_cast<microseconds>(now - Utils::GetTimeReference());
469  auto frame_start = now - (relative_time % _Mem::Get()->get_sampling_period());
470  auto after = max(now, frame_start + 2 * Utils::GetTimeTolerance());
471  auto before = frame_start + _Mem::Get()->get_sampling_period();
472  P<Code> fact;
473  if (executed_command) {
474  // Set fact to the efferent copy of the command and inject it.
475  fact = new Fact(executed_command, after, before, 1, 1);
476  View *view = new View(View::SYNC_ONCE, now, 1, 1, _Mem::Get()->get_stdin(), get_view()->get_host(), fact); // SYNC_ONCE, sln=1, res=1,
477  _Mem::Get()->inject(view);
478  string mk_rdx_info = "";
479 #ifdef WITH_DETAIL_OID
480  if (mk_rdx)
481  // We don't know the mk.rdx OID yet, so use the detail OID.
482  mk_rdx_info = " mk.rdx(" + to_string(mk_rdx->get_detail_oid()) + "):";
483 #endif
484  OUTPUT_LINE(IO_DEVICE_INJ_EJT, Utils::RelativeTime(Now()) << mk_rdx_info << " I/O device eject " << fact->get_oid());
485  }
486  else
487  // The command wasn't executed. Set fact to an anti-fact of the original command and record in the mk_rdx.
488  fact = new AntiFact(command, after, before, 1, 1);
489 
490  if (mk_rdx) {
491 
492  // Add the original command.
493  mk_rdx->code(write_index++) = Atom::IPointer(extent_index);
494  prods.get_child_deref(i).copy(mk_rdx, extent_index, extent_index);
495  // Add the fact of the injected command that we just made.
496  mk_rdx->code(write_index++) = Atom::RPointer(mk_rdx->references_size());
497  mk_rdx->add_reference(fact);
498  }
499  }
500  }
501 
502  if (mk_rdx) {
503 
504  mk_rdx->code(mk_rdx_prod_index) = Atom::Set(cmd_count);
505  for (uint16 i = 1; i <= ntf_grp_count; ++i) {
506 
507  NotificationView *v = new NotificationView(get_view()->get_host(), get_view()->get_host()->get_ntf_grp(i), mk_rdx);
508  _Mem::Get()->inject_notification(v, true);
509  }
510 
511  OUTPUT_LINE((TraceLevel)0, Utils::RelativeTime(Now()) << " pgm " << controller_->get_object()->get_oid() <<
512  " -> mk.rdx " << mk_rdx->get_oid());
513  }
514 
515  return true;
516 }
517 
518 Code *InputLessPGMOverlay::get_mk_rdx(uint16 &extent_index) const {
519 
520  uint16 write_index = 0;
521  extent_index = MK_RDX_ARITY + 1;
522 
523  Code *mk_rdx = new r_exec::LObject(_Mem::Get());
524 
525  mk_rdx->code(write_index++) = Atom::Marker(Opcodes::MkRdx, MK_RDX_ARITY);
526  mk_rdx->code(write_index++) = Atom::RPointer(0); // code.
527  mk_rdx->add_reference(get_object());
528  mk_rdx->code(write_index++) = Atom::IPointer(extent_index); // inputs.
529  mk_rdx->code(extent_index++) = Atom::Set(0);
530  mk_rdx->code(write_index++) = Atom::IPointer(extent_index); // productions.
531  mk_rdx->code(write_index++) = Atom::Float(1); // psln_thr.
532 
533  return mk_rdx;
534 }
535 
537 
538 PGMOverlay::PGMOverlay(Controller *c) : InputLessPGMOverlay(c) {
539 
540  is_volatile_ = c->get_object()->code(IPGM_RES).asBoolean();
541  init();
542 }
543 
544 PGMOverlay::PGMOverlay(PGMOverlay *original, uint16 last_input_index, uint16 value_commit_index) : InputLessPGMOverlay() {
545 
546  controller_ = original->controller_;
547 
548  input_pattern_indices_ = original->input_pattern_indices_;
549  input_pattern_indices_.push_back(last_input_index); // put back the last original's input index.
550  for (uint16 i = 0; i < original->input_views_.size() - 1; ++i) // ommit the last original's input view.
551  input_views_.push_back(original->input_views_[i]);
552 
553  code_size_ = original->code_size_;
554  code_ = new r_code::Atom[code_size_];
555  memcpy(code_, original->code_, code_size_ * sizeof(r_code::Atom)); // copy patched code.
556 
557  Atom *original_code = &get_object()->get_reference(0)->code(0);
558  for (uint16 i = 0; i < original->patch_indices_.size(); ++i) // unpatch code.
559  code_[original->patch_indices_[i]] = original_code[original->patch_indices_[i]];
560 
561  value_commit_index_ = value_commit_index;
562  for (uint16 i = 0; i < value_commit_index; ++i) // copy values up to the last commit index.
563  values_.push_back(original->values_[i]);
564 
565  is_volatile_ = original->is_volatile_;
566  birth_time_ = original->birth_time_;
567 }
568 
569 PGMOverlay::~PGMOverlay() {
570 }
571 
572 void PGMOverlay::init() {
573 
574  // init the list of pattern indices.
575  uint16 pattern_set_index = code_[PGM_INPUTS].asIndex();
576  uint16 pattern_count = code_[pattern_set_index].getAtomCount();
577  for (uint16 i = 1; i <= pattern_count; ++i)
578  input_pattern_indices_.push_back(code_[pattern_set_index + i].asIndex());
579 
580  birth_time_ = Timestamp(seconds(0));
581 }
582 
583 bool PGMOverlay::is_invalidated() {
584 
585  if (is_volatile_) {
586 
587  for (uint32 i = 0; i < input_views_.size(); ++i) {
588 
589  if (input_views_[i]->object_->is_invalidated())
590  return (invalidated_ = 1);
591  }
592  }
593 
594  return invalidated_ == 1;
595 }
596 
597 Code *PGMOverlay::dereference_in_ptr(Atom a) {
598 
599  switch (a.getDescriptor()) {
600  case Atom::IN_OBJ_PTR:
601  return getInputObject(a.asInputIndex());
602  case Atom::D_IN_OBJ_PTR: {
603  Atom ptr = code_[a.asRelativeIndex()]; // must be either an IN_OBJ_PTR or a D_IN_OBJ_PTR.
604  Code *parent = dereference_in_ptr(ptr);
605  return parent->get_reference(parent->code(ptr.asIndex()).asIndex());
606  }default: // shall never happen.
607  return NULL;
608  }
609 }
610 
611 // Patch recursively : in pgm_code[pgm_code_index] with (D_)IN_OBJ_PTRs until ::.
612 void PGMOverlay::patch_input_code(uint16 pgm_code_index, uint16 input_index, uint16 input_code_index, int16 parent_index) {
613 
614  uint16 atom_count = code_[pgm_code_index].getAtomCount();
615 
616  // Replace the head of a structure by a ptr to the input object.
617  Atom head;
618  if (parent_index < 0)
619  head = code_[pgm_code_index] = Atom::InObjPointer(input_index, input_code_index);
620  else
621  head = code_[pgm_code_index] = Atom::DInObjPointer(parent_index, input_code_index);
622  patch_indices_.push_back(pgm_code_index);
623 
624  // Proceed with the structure's members.
625  for (uint16 j = 1; j <= atom_count; ++j) {
626 
627  uint16 patch_index = pgm_code_index + j;
628  switch (code_[patch_index].getDescriptor()) {
629  case Atom::T_WILDCARD:
630  // Leave as is and stop patching.
631  return;
632  case Atom::WILDCARD:
633  if (parent_index < 0)
634  code_[patch_index] = Atom::InObjPointer(input_index, input_code_index + j);
635  else
636  code_[patch_index] = Atom::DInObjPointer(parent_index, input_code_index + j);
637  patch_indices_.push_back(patch_index);
638  break;
639  case Atom::I_PTR: {
640  // Sub-structure: go one level deeper in the pattern.
641  uint16 indirection = code_[patch_index].asIndex(); // save the indirection before patching.
642 
643  if (parent_index < 0)
644  code_[patch_index] = Atom::InObjPointer(input_index, input_code_index + j);
645  else
646  code_[patch_index] = Atom::DInObjPointer(parent_index, input_code_index + j);
647  patch_indices_.push_back(patch_index);
648  switch (dereference_in_ptr(head)->code(input_code_index + j).getDescriptor()) {
649  // Caution: the pattern points to sub-structures using iptrs. However, the input object may have a rptr instead of an iptr: we have to disambiguate.
650  case Atom::I_PTR:
651  // Dereference and recurse.
652  patch_input_code(indirection, input_index, dereference_in_ptr(head)->code(input_code_index + j).asIndex(), parent_index);
653  break;
654  case Atom::R_PTR:
655  // Do not dereference and recurse.
656  patch_input_code(indirection, input_index, 0, patch_index);
657  break;
658  default:
659  // Shall never happen.
660  break;
661  }
662  break;
663  }default:
664  // Leave as is.
665  break;
666  }
667  }
668 }
669 
670 Overlay *PGMOverlay::reduce(r_exec::View *input) {
671 
672  uint16 input_index;
673  switch (match(input, input_index)) {
674  case SUCCESS:
675  if (input_pattern_indices_.size() == 0) { // all patterns matched.
676 
677  if (check_guards() && inject_productions()) {
678 
679  ((PGMController *)controller_)->notify_reduction();
680  PGMOverlay *offspring = new PGMOverlay(this, input_index, value_commit_index_);
681  invalidate();
682  return offspring;
683  } else {
684 
685  PGMOverlay *offspring = new PGMOverlay(this, input_index, value_commit_index_);
686  invalidate();
687  return offspring;
688  }
689  } else { // create an overlay in a state where the last input is not matched: this overlay will be able to catch other candidates for the input patterns that have already been matched.
690 
691  PGMOverlay *offspring = new PGMOverlay(this, input_index, value_commit_index_);
692  commit();
693  if (birth_time_.time_since_epoch().count() == 0)
694  birth_time_ = Now();
695  return offspring;
696  }
697  case FAILURE: // just rollback: let the overlay match other inputs.
698  rollback();
699  case IMPOSSIBLE:
700  return NULL;
701  }
702 
703  return NULL;
704 }
705 
706 PGMOverlay::MatchResult PGMOverlay::match(r_exec::View *input, uint16 &input_index) {
707 
708  input_views_.push_back(input);
709  bool failed = false;
711  for (it = input_pattern_indices_.begin(); it != input_pattern_indices_.end(); ++it) {
712 
713  MatchResult r = _match(input, *it);
714  switch (r) {
715  case SUCCESS:
716  input_index = *it;
717  input_pattern_indices_.erase(it);
718  return r;
719  case FAILURE:
720  failed = true;
721  rollback(); // to try another pattern on a clean basis.
722  case IMPOSSIBLE:
723  break;
724  }
725  }
726  input_views_.pop_back();
727  return failed ? FAILURE : IMPOSSIBLE;
728 }
729 
730 PGMOverlay::MatchResult PGMOverlay::_match(r_exec::View *input, uint16 pattern_index) {
731 
732  if (code_[pattern_index].asOpcode() == Opcodes::AntiPtn) {
733 
734  IPGMContext input_object = IPGMContext::GetContextFromInput(input, this);
735  IPGMContext pattern_skeleton(get_object()->get_reference(0), get_view(), code_, code_[pattern_index + 1].asIndex(), this); // pgm_code[pattern_index] is the first atom of the pattern; pgm_code[pattern_index+1] is an iptr to the skeleton.
736  if (!pattern_skeleton.match(input_object))
737  return SUCCESS;
738  MatchResult r = __match(input, pattern_index);
739  switch (r) {
740  case IMPOSSIBLE:
741  case FAILURE:
742  return SUCCESS;
743  case SUCCESS:
744  return FAILURE;
745  }
746  } else if (code_[pattern_index].asOpcode() == Opcodes::Ptn) {
747 
748  IPGMContext input_object = IPGMContext::GetContextFromInput(input, this);
749  IPGMContext pattern_skeleton(get_object()->get_reference(0), get_view(), code_, code_[pattern_index + 1].asIndex(), this); // pgm_code[pattern_index] is the first atom of the pattern; pgm_code[pattern_index+1] is an iptr to the skeleton.
750  if (!pattern_skeleton.match(input_object))
751  return IMPOSSIBLE;
752  return __match(input, pattern_index);
753  }
754  return IMPOSSIBLE;
755 }
756 
757 PGMOverlay::MatchResult PGMOverlay::__match(r_exec::View *input, uint16 pattern_index) {
758  // The input has just been pushed on input_views_ (see match).
759  // pgm_code[pattern_index+1].asIndex() is the structure pointed by the pattern's skeleton.
760  patch_input_code(code_[pattern_index + 1].asIndex(), input_views_.size() - 1, 0);
761  // match: evaluate the set of guards.
762  uint16 guard_set_index = code_[pattern_index + 2].asIndex();
763  // Get the IPGMContext like in InputLessPGMOverlay::evaluate.
764  IPGMContext c(get_object()->get_reference(0), get_view(), code_, guard_set_index, this);
765  if (!c.evaluate())
766  return FAILURE;
767  if (c.dereference()[0].isBooleanFalse())
768  // The boolean guard is false.
769  return FAILURE;
770  return SUCCESS;
771 }
772 
773 bool PGMOverlay::check_guards() {
774 
775  uint16 guard_set_index = code_[PGM_GUARDS].asIndex();
776  uint16 guard_count = code_[guard_set_index].getAtomCount();
777  for (uint16 i = 1; i <= guard_count; ++i) {
778 
779  // Get the IPGMContext like in InputLessPGMOverlay::evaluate.
780  IPGMContext c(get_object()->get_reference(0), get_view(), code_, guard_set_index + i, this);
781  if (!c.evaluate())
782  return false;
783  if (c.dereference()[0].isBooleanFalse())
784  // The boolean guard is false.
785  return false;
786  }
787  return true;
788 }
789 
790 Code *PGMOverlay::get_mk_rdx(uint16 &extent_index) const {
791 
792  uint16 write_index = 0;
793  extent_index = MK_RDX_ARITY + 1;
794 
795  Code *mk_rdx = new r_exec::LObject(_Mem::Get());
796 
797  mk_rdx->code(write_index++) = Atom::Marker(Opcodes::MkRdx, MK_RDX_ARITY);
798  mk_rdx->code(write_index++) = Atom::RPointer(0); // code.
799  mk_rdx->add_reference(get_object());
800  mk_rdx->code(write_index++) = Atom::IPointer(extent_index); // inputs.
801  mk_rdx->code(extent_index++) = Atom::Set(input_views_.size());
802  for (uint16 i = 0; i < input_views_.size(); ++i) {
803 
804  mk_rdx->code(extent_index++) = Atom::RPointer(i + 1);
805  mk_rdx->add_reference(input_views_[i]->object_);
806  }
807  mk_rdx->code(write_index++) = Atom::IPointer(extent_index); // productions.
808  mk_rdx->code(write_index++) = Atom::Float(1); // psln_thr.
809 
810  return mk_rdx;
811 }
812 
814 
815 AntiPGMOverlay::~AntiPGMOverlay() {
816 }
817 
818 Overlay *AntiPGMOverlay::reduce(r_exec::View *input) {
819 
820  uint16 input_index;
821  switch (match(input, input_index)) {
822  case SUCCESS:
823  if (input_pattern_indices_.size() == 0) { // all patterns matched.
824 
825  if (check_guards()) {
826 
827  ((AntiPGMController *)controller_)->restart();
828  return NULL;
829  } else {
830  rollback();
831  return NULL;
832  }
833  } else {
834 
835  AntiPGMOverlay *offspring = new AntiPGMOverlay(this, input_index, value_commit_index_);
836  commit();
837  return offspring;
838  }
839  case FAILURE: // just rollback: let the overlay match other inputs.
840  rollback();
841  case IMPOSSIBLE:
842  return NULL;
843  }
844 
845  return NULL;
846 }
847 }
r_code::list::const_iterator
Definition: list.h:266
r_exec::IPGMContext
Definition: context.h:102
r_exec::InputLessPGMOverlay::evaluate
bool evaluate(uint16 index)
Definition: pgm_overlay.cpp:164
r_code::Atom
Definition: atom.h:104
r_exec::PGMOverlay::__match
MatchResult __match(r_exec::View *input, uint16 pattern_index)
Definition: pgm_overlay.cpp:757
r_exec::LObject
Definition: r_exec/object.h:195
core::P
Definition: base.h:103
r_code::Code
Definition: r_code/object.h:224
r_exec::View
Definition: view.h:102