- Notifications
You must be signed in to change notification settings - Fork 31.7k
/
Copy pathceval.c
5177 lines (4671 loc) · 160 KB
/
ceval.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* Execute compiled code */
/* XXX TO DO:
XXX speed up searching for keywords by using a dictionary
XXX document it!
*/
/* enable more aggressive intra-module optimizations, where available */
#definePY_LOCAL_AGGRESSIVE
#include"Python.h"
#include"internal/pystate.h"
#include"code.h"
#include"dictobject.h"
#include"frameobject.h"
#include"opcode.h"
#include"pydtrace.h"
#include"setobject.h"
#include"structmember.h"
#include<ctype.h>
#ifdefPy_DEBUG
/* For debugging the interpreter: */
#defineLLTRACE 1 /* Low-level trace feature */
#defineCHECKEXC 1 /* Double-check exception checking */
#endif
/* Private API for the LOAD_METHOD opcode. */
externint_PyObject_GetMethod(PyObject*, PyObject*, PyObject**);
typedefPyObject*(*callproc)(PyObject*, PyObject*, PyObject*);
/* Forward declarations */
Py_LOCAL_INLINE(PyObject*) call_function(PyObject***, Py_ssize_t,
PyObject*);
staticPyObject*do_call_core(PyObject*, PyObject*, PyObject*);
#ifdefLLTRACE
staticintlltrace;
staticintprtrace(PyObject*, constchar*);
#endif
staticintcall_trace(Py_tracefunc, PyObject*,
PyThreadState*, PyFrameObject*,
int, PyObject*);
staticintcall_trace_protected(Py_tracefunc, PyObject*,
PyThreadState*, PyFrameObject*,
int, PyObject*);
staticvoidcall_exc_trace(Py_tracefunc, PyObject*,
PyThreadState*, PyFrameObject*);
staticintmaybe_call_line_trace(Py_tracefunc, PyObject*,
PyThreadState*, PyFrameObject*,
int*, int*, int*);
staticvoidmaybe_dtrace_line(PyFrameObject*, int*, int*, int*);
staticvoiddtrace_function_entry(PyFrameObject*);
staticvoiddtrace_function_return(PyFrameObject*);
staticPyObject*cmp_outcome(int, PyObject*, PyObject*);
staticPyObject*import_name(PyFrameObject*, PyObject*, PyObject*,
PyObject*);
staticPyObject*import_from(PyObject*, PyObject*);
staticintimport_all_from(PyObject*, PyObject*);
staticvoidformat_exc_check_arg(PyObject*, constchar*, PyObject*);
staticvoidformat_exc_unbound(PyCodeObject*co, intoparg);
staticPyObject*unicode_concatenate(PyObject*, PyObject*,
PyFrameObject*, const_Py_CODEUNIT*);
staticPyObject*special_lookup(PyObject*, _Py_Identifier*);
staticintcheck_args_iterable(PyObject*func, PyObject*vararg);
staticvoidformat_kwargs_mapping_error(PyObject*func, PyObject*kwargs);
staticvoidformat_awaitable_error(PyTypeObject*, int);
#defineNAME_ERROR_MSG \
"name '%.200s' is not defined"
#defineUNBOUNDLOCAL_ERROR_MSG \
"local variable '%.200s' referenced before assignment"
#defineUNBOUNDFREE_ERROR_MSG \
"free variable '%.200s' referenced before assignment" \
" in enclosing scope"
/* Dynamic execution profile */
#ifdefDYNAMIC_EXECUTION_PROFILE
#ifdefDXPAIRS
staticlongdxpairs[257][256];
#definedxp dxpairs[256]
#else
staticlongdxp[256];
#endif
#endif
#defineGIL_REQUEST _Py_atomic_load_relaxed(&_PyRuntime.ceval.gil_drop_request)
/* This can set eval_breaker to 0 even though gil_drop_request became
1. We believe this is all right because the eval loop will release
the GIL eventually anyway. */
#defineCOMPUTE_EVAL_BREAKER() \
_Py_atomic_store_relaxed( \
&_PyRuntime.ceval.eval_breaker, \
GIL_REQUEST | \
_Py_atomic_load_relaxed(&_PyRuntime.ceval.pending.calls_to_do) | \
_PyRuntime.ceval.pending.async_exc)
#defineSET_GIL_DROP_REQUEST() \
do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
} while (0)
#defineRESET_GIL_DROP_REQUEST() \
do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.gil_drop_request, 0); \
COMPUTE_EVAL_BREAKER(); \
} while (0)
/* Pending calls are only modified under pending_lock */
#defineSIGNAL_PENDING_CALLS() \
do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 1); \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
} while (0)
#defineUNSIGNAL_PENDING_CALLS() \
do { \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.pending.calls_to_do, 0); \
COMPUTE_EVAL_BREAKER(); \
} while (0)
#defineSIGNAL_ASYNC_EXC() \
do { \
_PyRuntime.ceval.pending.async_exc = 1; \
_Py_atomic_store_relaxed(&_PyRuntime.ceval.eval_breaker, 1); \
} while (0)
#defineUNSIGNAL_ASYNC_EXC() \
do { \
_PyRuntime.ceval.pending.async_exc = 0; \
COMPUTE_EVAL_BREAKER(); \
} while (0)
#ifdefHAVE_ERRNO_H
#include<errno.h>
#endif
#include"pythread.h"
#include"ceval_gil.h"
int
PyEval_ThreadsInitialized(void)
{
returngil_created();
}
void
PyEval_InitThreads(void)
{
if (gil_created())
return;
create_gil();
take_gil(PyThreadState_GET());
_PyRuntime.ceval.pending.main_thread=PyThread_get_thread_ident();
if (!_PyRuntime.ceval.pending.lock)
_PyRuntime.ceval.pending.lock=PyThread_allocate_lock();
}
void
_PyEval_FiniThreads(void)
{
if (!gil_created())
return;
destroy_gil();
assert(!gil_created());
}
void
PyEval_AcquireLock(void)
{
PyThreadState*tstate=PyThreadState_GET();
if (tstate==NULL)
Py_FatalError("PyEval_AcquireLock: current thread state is NULL");
take_gil(tstate);
}
void
PyEval_ReleaseLock(void)
{
/* This function must succeed when the current thread state is NULL.
We therefore avoid PyThreadState_GET() which dumps a fatal error
in debug mode.
*/
drop_gil((PyThreadState*)_Py_atomic_load_relaxed(
&_PyThreadState_Current));
}
void
PyEval_AcquireThread(PyThreadState*tstate)
{
if (tstate==NULL)
Py_FatalError("PyEval_AcquireThread: NULL new thread state");
/* Check someone has called PyEval_InitThreads() to create the lock */
assert(gil_created());
take_gil(tstate);
if (PyThreadState_Swap(tstate) !=NULL)
Py_FatalError(
"PyEval_AcquireThread: non-NULL old thread state");
}
void
PyEval_ReleaseThread(PyThreadState*tstate)
{
if (tstate==NULL)
Py_FatalError("PyEval_ReleaseThread: NULL thread state");
if (PyThreadState_Swap(NULL) !=tstate)
Py_FatalError("PyEval_ReleaseThread: wrong thread state");
drop_gil(tstate);
}
/* This function is called from PyOS_AfterFork_Child to destroy all threads
* which are not running in the child process, and clear internal locks
* which might be held by those threads.
*/
void
PyEval_ReInitThreads(void)
{
PyThreadState*current_tstate=PyThreadState_GET();
if (!gil_created())
return;
recreate_gil();
_PyRuntime.ceval.pending.lock=PyThread_allocate_lock();
take_gil(current_tstate);
_PyRuntime.ceval.pending.main_thread=PyThread_get_thread_ident();
/* Destroy all threads except the current one */
_PyThreadState_DeleteExcept(current_tstate);
}
/* This function is used to signal that async exceptions are waiting to be
raised, therefore it is also useful in non-threaded builds. */
void
_PyEval_SignalAsyncExc(void)
{
SIGNAL_ASYNC_EXC();
}
/* Functions save_thread and restore_thread are always defined so
dynamically loaded modules needn't be compiled separately for use
with and without threads: */
PyThreadState*
PyEval_SaveThread(void)
{
PyThreadState*tstate=PyThreadState_Swap(NULL);
if (tstate==NULL)
Py_FatalError("PyEval_SaveThread: NULL tstate");
assert(gil_created());
drop_gil(tstate);
returntstate;
}
void
PyEval_RestoreThread(PyThreadState*tstate)
{
if (tstate==NULL)
Py_FatalError("PyEval_RestoreThread: NULL tstate");
assert(gil_created());
interr=errno;
take_gil(tstate);
/* _Py_Finalizing is protected by the GIL */
if (_Py_IsFinalizing() && !_Py_CURRENTLY_FINALIZING(tstate)) {
drop_gil(tstate);
PyThread_exit_thread();
Py_UNREACHABLE();
}
errno=err;
PyThreadState_Swap(tstate);
}
/* Mechanism whereby asynchronously executing callbacks (e.g. UNIX
signal handlers or Mac I/O completion routines) can schedule calls
to a function to be called synchronously.
The synchronous function is called with one void* argument.
It should return 0 for success or -1 for failure -- failure should
be accompanied by an exception.
If registry succeeds, the registry function returns 0; if it fails
(e.g. due to too many pending calls) it returns -1 (without setting
an exception condition).
Note that because registry may occur from within signal handlers,
or other asynchronous events, calling malloc() is unsafe!
Any thread can schedule pending calls, but only the main thread
will execute them.
There is no facility to schedule calls to a particular thread, but
that should be easy to change, should that ever be required. In
that case, the static variables here should go into the python
threadstate.
*/
void
_PyEval_SignalReceived(void)
{
/* bpo-30703: Function called when the C signal handler of Python gets a
signal. We cannot queue a callback using Py_AddPendingCall() since
that function is not async-signal-safe. */
SIGNAL_PENDING_CALLS();
}
/* This implementation is thread-safe. It allows
scheduling to be made from any thread, and even from an executing
callback.
*/
int
Py_AddPendingCall(int (*func)(void*), void*arg)
{
inti, j, result=0;
PyThread_type_locklock=_PyRuntime.ceval.pending.lock;
/* try a few times for the lock. Since this mechanism is used
* for signal handling (on the main thread), there is a (slim)
* chance that a signal is delivered on the same thread while we
* hold the lock during the Py_MakePendingCalls() function.
* This avoids a deadlock in that case.
* Note that signals can be delivered on any thread. In particular,
* on Windows, a SIGINT is delivered on a system-created worker
* thread.
* We also check for lock being NULL, in the unlikely case that
* this function is called before any bytecode evaluation takes place.
*/
if (lock!=NULL) {
for (i=0; i<100; i++) {
if (PyThread_acquire_lock(lock, NOWAIT_LOCK))
break;
}
if (i==100)
return-1;
}
i=_PyRuntime.ceval.pending.last;
j= (i+1) % NPENDINGCALLS;
if (j==_PyRuntime.ceval.pending.first) {
result=-1; /* Queue full */
} else {
_PyRuntime.ceval.pending.calls[i].func=func;
_PyRuntime.ceval.pending.calls[i].arg=arg;
_PyRuntime.ceval.pending.last=j;
}
/* signal main loop */
SIGNAL_PENDING_CALLS();
if (lock!=NULL)
PyThread_release_lock(lock);
returnresult;
}
int
Py_MakePendingCalls(void)
{
staticintbusy=0;
inti;
intr=0;
assert(PyGILState_Check());
if (!_PyRuntime.ceval.pending.lock) {
/* initial allocation of the lock */
_PyRuntime.ceval.pending.lock=PyThread_allocate_lock();
if (_PyRuntime.ceval.pending.lock==NULL)
return-1;
}
/* only service pending calls on main thread */
if (_PyRuntime.ceval.pending.main_thread&&
PyThread_get_thread_ident() !=_PyRuntime.ceval.pending.main_thread)
{
return0;
}
/* don't perform recursive pending calls */
if (busy)
return0;
busy=1;
/* unsignal before starting to call callbacks, so that any callback
added in-between re-signals */
UNSIGNAL_PENDING_CALLS();
/* Python signal handler doesn't really queue a callback: it only signals
that a signal was received, see _PyEval_SignalReceived(). */
if (PyErr_CheckSignals() <0) {
goto error;
}
/* perform a bounded number of calls, in case of recursion */
for (i=0; i<NPENDINGCALLS; i++) {
intj;
int (*func)(void*);
void*arg=NULL;
/* pop one item off the queue while holding the lock */
PyThread_acquire_lock(_PyRuntime.ceval.pending.lock, WAIT_LOCK);
j=_PyRuntime.ceval.pending.first;
if (j==_PyRuntime.ceval.pending.last) {
func=NULL; /* Queue empty */
} else {
func=_PyRuntime.ceval.pending.calls[j].func;
arg=_PyRuntime.ceval.pending.calls[j].arg;
_PyRuntime.ceval.pending.first= (j+1) % NPENDINGCALLS;
}
PyThread_release_lock(_PyRuntime.ceval.pending.lock);
/* having released the lock, perform the callback */
if (func==NULL)
break;
r=func(arg);
if (r) {
goto error;
}
}
busy=0;
returnr;
error:
busy=0;
SIGNAL_PENDING_CALLS(); /* We're not done yet */
return-1;
}
/* The interpreter's recursion limit */
#ifndefPy_DEFAULT_RECURSION_LIMIT
#definePy_DEFAULT_RECURSION_LIMIT 1000
#endif
int_Py_CheckRecursionLimit=Py_DEFAULT_RECURSION_LIMIT;
void
_PyEval_Initialize(struct_ceval_runtime_state*state)
{
state->recursion_limit=Py_DEFAULT_RECURSION_LIMIT;
_Py_CheckRecursionLimit=Py_DEFAULT_RECURSION_LIMIT;
_gil_initialize(&state->gil);
}
int
Py_GetRecursionLimit(void)
{
return_PyRuntime.ceval.recursion_limit;
}
void
Py_SetRecursionLimit(intnew_limit)
{
_PyRuntime.ceval.recursion_limit=new_limit;
_Py_CheckRecursionLimit=_PyRuntime.ceval.recursion_limit;
}
/* the macro Py_EnterRecursiveCall() only calls _Py_CheckRecursiveCall()
if the recursion_depth reaches _Py_CheckRecursionLimit.
If USE_STACKCHECK, the macro decrements _Py_CheckRecursionLimit
to guarantee that _Py_CheckRecursiveCall() is regularly called.
Without USE_STACKCHECK, there is no need for this. */
int
_Py_CheckRecursiveCall(constchar*where)
{
PyThreadState*tstate=PyThreadState_GET();
intrecursion_limit=_PyRuntime.ceval.recursion_limit;
#ifdefUSE_STACKCHECK
tstate->stackcheck_counter=0;
if (PyOS_CheckStack()) {
--tstate->recursion_depth;
PyErr_SetString(PyExc_MemoryError, "Stack overflow");
return-1;
}
/* Needed for ABI backwards-compatibility (see bpo-31857) */
_Py_CheckRecursionLimit=recursion_limit;
#endif
if (tstate->recursion_critical)
/* Somebody asked that we don't check for recursion. */
return0;
if (tstate->overflowed) {
if (tstate->recursion_depth>recursion_limit+50) {
/* Overflowing while handling an overflow. Give up. */
Py_FatalError("Cannot recover from stack overflow.");
}
return0;
}
if (tstate->recursion_depth>recursion_limit) {
--tstate->recursion_depth;
tstate->overflowed=1;
PyErr_Format(PyExc_RecursionError,
"maximum recursion depth exceeded%s",
where);
return-1;
}
return0;
}
/* Status code for main loop (reason for stack unwind) */
enumwhy_code {
WHY_NOT=0x0001, /* No error */
WHY_EXCEPTION=0x0002, /* Exception occurred */
WHY_RETURN=0x0008, /* 'return' statement */
WHY_BREAK=0x0010, /* 'break' statement */
WHY_CONTINUE=0x0020, /* 'continue' statement */
WHY_YIELD=0x0040, /* 'yield' operator */
WHY_SILENCED=0x0080/* Exception silenced by 'with' */
};
staticintdo_raise(PyObject*, PyObject*);
staticintunpack_iterable(PyObject*, int, int, PyObject**);
#define_Py_TracingPossible _PyRuntime.ceval.tracing_possible
PyObject*
PyEval_EvalCode(PyObject*co, PyObject*globals, PyObject*locals)
{
returnPyEval_EvalCodeEx(co,
globals, locals,
(PyObject**)NULL, 0,
(PyObject**)NULL, 0,
(PyObject**)NULL, 0,
NULL, NULL);
}
/* Interpreter main loop */
PyObject*
PyEval_EvalFrame(PyFrameObject*f) {
/* This is for backward compatibility with extension modules that
used this API; core interpreter code should call
PyEval_EvalFrameEx() */
returnPyEval_EvalFrameEx(f, 0);
}
PyObject*
PyEval_EvalFrameEx(PyFrameObject*f, intthrowflag)
{
PyThreadState*tstate=PyThreadState_GET();
returntstate->interp->eval_frame(f, throwflag);
}
PyObject*_Py_HOT_FUNCTION
_PyEval_EvalFrameDefault(PyFrameObject*f, intthrowflag)
{
#ifdefDXPAIRS
intlastopcode=0;
#endif
PyObject**stack_pointer; /* Next free slot in value stack */
const_Py_CODEUNIT*next_instr;
intopcode; /* Current opcode */
intoparg; /* Current opcode argument, if any */
enumwhy_codewhy; /* Reason for block stack unwind */
PyObject**fastlocals, **freevars;
PyObject*retval=NULL; /* Return value */
PyThreadState*tstate=PyThreadState_GET();
PyCodeObject*co;
/* when tracing we set things up so that
not (instr_lb <= current_bytecode_offset < instr_ub)
is true when the line being executed has changed. The
initial values are such as to make this false the first
time it is tested. */
intinstr_ub=-1, instr_lb=0, instr_prev=-1;
const_Py_CODEUNIT*first_instr;
PyObject*names;
PyObject*consts;
#ifdefLLTRACE
_Py_IDENTIFIER(__ltrace__);
#endif
/* Computed GOTOs, or
the-optimization-commonly-but-improperly-known-as-"threaded code"
using gcc's labels-as-values extension
(http://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html).
The traditional bytecode evaluation loop uses a "switch" statement, which
decent compilers will optimize as a single indirect branch instruction
combined with a lookup table of jump addresses. However, since the
indirect jump instruction is shared by all opcodes, the CPU will have a
hard time making the right prediction for where to jump next (actually,
it will be always wrong except in the uncommon case of a sequence of
several identical opcodes).
"Threaded code" in contrast, uses an explicit jump table and an explicit
indirect jump instruction at the end of each opcode. Since the jump
instruction is at a different address for each opcode, the CPU will make a
separate prediction for each of these instructions, which is equivalent to
predicting the second opcode of each opcode pair. These predictions have
a much better chance to turn out valid, especially in small bytecode loops.
A mispredicted branch on a modern CPU flushes the whole pipeline and
can cost several CPU cycles (depending on the pipeline depth),
and potentially many more instructions (depending on the pipeline width).
A correctly predicted branch, however, is nearly free.
At the time of this writing, the "threaded code" version is up to 15-20%
faster than the normal "switch" version, depending on the compiler and the
CPU architecture.
We disable the optimization if DYNAMIC_EXECUTION_PROFILE is defined,
because it would render the measurements invalid.
NOTE: care must be taken that the compiler doesn't try to "optimize" the
indirect jumps by sharing them between all opcodes. Such optimizations
can be disabled on gcc by using the -fno-gcse flag (or possibly
-fno-crossjumping).
*/
#ifdefDYNAMIC_EXECUTION_PROFILE
#undef USE_COMPUTED_GOTOS
#defineUSE_COMPUTED_GOTOS 0
#endif
#ifdefHAVE_COMPUTED_GOTOS
#ifndefUSE_COMPUTED_GOTOS
#defineUSE_COMPUTED_GOTOS 1
#endif
#else
#if defined(USE_COMPUTED_GOTOS) &&USE_COMPUTED_GOTOS
#error "Computed gotos are not supported on this compiler."
#endif
#undef USE_COMPUTED_GOTOS
#defineUSE_COMPUTED_GOTOS 0
#endif
#ifUSE_COMPUTED_GOTOS
/* Import the static jump table */
#include"opcode_targets.h"
#defineTARGET(op) \
TARGET_##op: \
case op:
#defineDISPATCH() \
{ \
if (!_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) { \
FAST_DISPATCH(); \
} \
continue; \
}
#ifdefLLTRACE
#defineFAST_DISPATCH() \
{ \
if (!lltrace && !_Py_TracingPossible && !PyDTrace_LINE_ENABLED()) { \
f->f_lasti = INSTR_OFFSET(); \
NEXTOPARG(); \
goto *opcode_targets[opcode]; \
} \
goto fast_next_opcode; \
}
#else
#defineFAST_DISPATCH() \
{ \
if (!_Py_TracingPossible && !PyDTrace_LINE_ENABLED()) { \
f->f_lasti = INSTR_OFFSET(); \
NEXTOPARG(); \
goto *opcode_targets[opcode]; \
} \
goto fast_next_opcode; \
}
#endif
#else
#defineTARGET(op) \
case op:
#defineDISPATCH() continue
#defineFAST_DISPATCH() goto fast_next_opcode
#endif
/* Tuple access macros */
#ifndefPy_DEBUG
#defineGETITEM(v, i) PyTuple_GET_ITEM((PyTupleObject *)(v), (i))
#else
#defineGETITEM(v, i) PyTuple_GetItem((v), (i))
#endif
/* Code access macros */
/* The integer overflow is checked by an assertion below. */
#defineINSTR_OFFSET() \
(sizeof(_Py_CODEUNIT) * (int)(next_instr - first_instr))
#defineNEXTOPARG() do { \
_Py_CODEUNIT word = *next_instr; \
opcode = _Py_OPCODE(word); \
oparg = _Py_OPARG(word); \
next_instr++; \
} while (0)
#defineJUMPTO(x) (next_instr = first_instr + (x) / sizeof(_Py_CODEUNIT))
#defineJUMPBY(x) (next_instr += (x) / sizeof(_Py_CODEUNIT))
/* OpCode prediction macros
Some opcodes tend to come in pairs thus making it possible to
predict the second code when the first is run. For example,
COMPARE_OP is often followed by POP_JUMP_IF_FALSE or POP_JUMP_IF_TRUE.
Verifying the prediction costs a single high-speed test of a register
variable against a constant. If the pairing was good, then the
processor's own internal branch predication has a high likelihood of
success, resulting in a nearly zero-overhead transition to the
next opcode. A successful prediction saves a trip through the eval-loop
including its unpredictable switch-case branch. Combined with the
processor's internal branch prediction, a successful PREDICT has the
effect of making the two opcodes run as if they were a single new opcode
with the bodies combined.
If collecting opcode statistics, your choices are to either keep the
predictions turned-on and interpret the results as if some opcodes
had been combined or turn-off predictions so that the opcode frequency
counter updates for both opcodes.
Opcode prediction is disabled with threaded code, since the latter allows
the CPU to record separate branch prediction information for each
opcode.
*/
#if defined(DYNAMIC_EXECUTION_PROFILE) ||USE_COMPUTED_GOTOS
#definePREDICT(op) if (0) goto PRED_##op
#else
#definePREDICT(op) \
do{ \
_Py_CODEUNIT word = *next_instr; \
opcode = _Py_OPCODE(word); \
if (opcode == op){ \
oparg = _Py_OPARG(word); \
next_instr++; \
goto PRED_##op; \
} \
} while(0)
#endif
#definePREDICTED(op) PRED_##op:
/* Stack manipulation macros */
/* The stack can grow at most MAXINT deep, as co_nlocals and
co_stacksize are ints. */
#defineSTACK_LEVEL() ((int)(stack_pointer - f->f_valuestack))
#defineEMPTY() (STACK_LEVEL() == 0)
#defineTOP() (stack_pointer[-1])
#defineSECOND() (stack_pointer[-2])
#defineTHIRD() (stack_pointer[-3])
#defineFOURTH() (stack_pointer[-4])
#definePEEK(n) (stack_pointer[-(n)])
#defineSET_TOP(v) (stack_pointer[-1] = (v))
#defineSET_SECOND(v) (stack_pointer[-2] = (v))
#defineSET_THIRD(v) (stack_pointer[-3] = (v))
#defineSET_FOURTH(v) (stack_pointer[-4] = (v))
#defineSET_VALUE(n, v) (stack_pointer[-(n)] = (v))
#defineBASIC_STACKADJ(n) (stack_pointer += n)
#defineBASIC_PUSH(v) (*stack_pointer++ = (v))
#defineBASIC_POP() (*--stack_pointer)
#ifdefLLTRACE
#definePUSH(v) { (void)(BASIC_PUSH(v), \
lltrace && prtrace(TOP(), "push")); \
assert(STACK_LEVEL() <= co->co_stacksize); }
#definePOP() ((void)(lltrace && prtrace(TOP(), "pop")), \
BASIC_POP())
#defineSTACKADJ(n) { (void)(BASIC_STACKADJ(n), \
lltrace && prtrace(TOP(), "stackadj")); \
assert(STACK_LEVEL() <= co->co_stacksize); }
#defineEXT_POP(STACK_POINTER) ((void)(lltrace && \
prtrace((STACK_POINTER)[-1], "ext_pop")), \
*--(STACK_POINTER))
#else
#definePUSH(v) BASIC_PUSH(v)
#definePOP() BASIC_POP()
#defineSTACKADJ(n) BASIC_STACKADJ(n)
#defineEXT_POP(STACK_POINTER) (*--(STACK_POINTER))
#endif
/* Local variable macros */
#defineGETLOCAL(i) (fastlocals[i])
/* The SETLOCAL() macro must not DECREF the local variable in-place and
then store the new value; it must copy the old value to a temporary
value, then store the new value, and then DECREF the temporary value.
This is because it is possible that during the DECREF the frame is
accessed by other code (e.g. a __del__ method or gc.collect()) and the
variable would be pointing to already-freed memory. */
#defineSETLOCAL(i, value) do { PyObject *tmp = GETLOCAL(i); \
GETLOCAL(i) = value; \
Py_XDECREF(tmp); } while (0)
#defineUNWIND_BLOCK(b) \
while (STACK_LEVEL() > (b)->b_level) { \
PyObject *v = POP(); \
Py_XDECREF(v); \
}
#defineUNWIND_EXCEPT_HANDLER(b) \
do { \
PyObject *type, *value, *traceback; \
_PyErr_StackItem *exc_info; \
assert(STACK_LEVEL() >= (b)->b_level + 3); \
while (STACK_LEVEL() > (b)->b_level + 3) { \
value = POP(); \
Py_XDECREF(value); \
} \
exc_info = tstate->exc_info; \
type = exc_info->exc_type; \
value = exc_info->exc_value; \
traceback = exc_info->exc_traceback; \
exc_info->exc_type = POP(); \
exc_info->exc_value = POP(); \
exc_info->exc_traceback = POP(); \
Py_XDECREF(type); \
Py_XDECREF(value); \
Py_XDECREF(traceback); \
} while(0)
/* Start of code */
/* push frame */
if (Py_EnterRecursiveCall(""))
returnNULL;
tstate->frame=f;
if (tstate->use_tracing) {
if (tstate->c_tracefunc!=NULL) {
/* tstate->c_tracefunc, if defined, is a
function that will be called on *every* entry
to a code block. Its return value, if not
None, is a function that will be called at
the start of each executed line of code.
(Actually, the function must return itself
in order to continue tracing.) The trace
functions are called with three arguments:
a pointer to the current frame, a string
indicating why the function is called, and
an argument which depends on the situation.
The global trace function is also called
whenever an exception is detected. */
if (call_trace_protected(tstate->c_tracefunc,
tstate->c_traceobj,
tstate, f, PyTrace_CALL, Py_None)) {
/* Trace function raised an error */
goto exit_eval_frame;
}
}
if (tstate->c_profilefunc!=NULL) {
/* Similar for c_profilefunc, except it needn't
return itself and isn't called for "line" events */
if (call_trace_protected(tstate->c_profilefunc,
tstate->c_profileobj,
tstate, f, PyTrace_CALL, Py_None)) {
/* Profile function raised an error */
goto exit_eval_frame;
}
}
}
if (PyDTrace_FUNCTION_ENTRY_ENABLED())
dtrace_function_entry(f);
co=f->f_code;
names=co->co_names;
consts=co->co_consts;
fastlocals=f->f_localsplus;
freevars=f->f_localsplus+co->co_nlocals;
assert(PyBytes_Check(co->co_code));
assert(PyBytes_GET_SIZE(co->co_code) <= INT_MAX);
assert(PyBytes_GET_SIZE(co->co_code) % sizeof(_Py_CODEUNIT) ==0);
assert(_Py_IS_ALIGNED(PyBytes_AS_STRING(co->co_code), sizeof(_Py_CODEUNIT)));
first_instr= (_Py_CODEUNIT*) PyBytes_AS_STRING(co->co_code);
/*
f->f_lasti refers to the index of the last instruction,
unless it's -1 in which case next_instr should be first_instr.
YIELD_FROM sets f_lasti to itself, in order to repeatedly yield
multiple values.
When the PREDICT() macros are enabled, some opcode pairs follow in
direct succession without updating f->f_lasti. A successful
prediction effectively links the two codes together as if they
were a single new opcode; accordingly,f->f_lasti will point to
the first code in the pair (for instance, GET_ITER followed by
FOR_ITER is effectively a single opcode and f->f_lasti will point
to the beginning of the combined pair.)
*/
assert(f->f_lasti >= -1);
next_instr=first_instr;
if (f->f_lasti >= 0) {
assert(f->f_lasti % sizeof(_Py_CODEUNIT) ==0);
next_instr+=f->f_lasti / sizeof(_Py_CODEUNIT) +1;
}
stack_pointer=f->f_stacktop;
assert(stack_pointer!=NULL);
f->f_stacktop=NULL; /* remains NULL unless yield suspends frame */
f->f_executing=1;
#ifdefLLTRACE
lltrace=_PyDict_GetItemId(f->f_globals, &PyId___ltrace__) !=NULL;
#endif
why=WHY_NOT;
if (throwflag) /* support for generator.throw() */
goto error;
#ifdefPy_DEBUG
/* PyEval_EvalFrameEx() must not be called with an exception set,
because it can clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
#endif
for (;;) {
assert(stack_pointer >= f->f_valuestack); /* else underflow */
assert(STACK_LEVEL() <= co->co_stacksize); /* else overflow */
assert(!PyErr_Occurred());
/* Do periodic things. Doing this every time through
the loop would add too much overhead, so we do it
only every Nth instruction. We also do it if
``pendingcalls_to_do'' is set, i.e. when an asynchronous
event needs attention (e.g. a signal handler or
async I/O handler); see Py_AddPendingCall() and
Py_MakePendingCalls() above. */
if (_Py_atomic_load_relaxed(&_PyRuntime.ceval.eval_breaker)) {
opcode=_Py_OPCODE(*next_instr);
if (opcode==SETUP_FINALLY||
opcode==SETUP_WITH||
opcode==BEFORE_ASYNC_WITH||
opcode==YIELD_FROM) {
/* Few cases where we skip running signal handlers and other
pending calls:
- If we're about to enter the 'with:'. It will prevent
emitting a resource warning in the common idiom
'with open(path) as file:'.
- If we're about to enter the 'async with:'.
- If we're about to enter the 'try:' of a try/finally (not
*very* useful, but might help in some cases and it's
traditional)
- If we're resuming a chain of nested 'yield from' or
'await' calls, then each frame is parked with YIELD_FROM
as its next opcode. If the user hit control-C we want to
wait until we've reached the innermost frame before
running the signal handler and raising KeyboardInterrupt
(see bpo-30039).
*/
goto fast_next_opcode;
}
if (_Py_atomic_load_relaxed(
&_PyRuntime.ceval.pending.calls_to_do))
{
if (Py_MakePendingCalls() <0)
goto error;
}
if (_Py_atomic_load_relaxed(
&_PyRuntime.ceval.gil_drop_request))
{
/* Give another thread a chance */
if (PyThreadState_Swap(NULL) !=tstate)
Py_FatalError("ceval: tstate mix-up");
drop_gil(tstate);
/* Other threads may run now */
take_gil(tstate);
/* Check if we should make a quick exit. */
if (_Py_IsFinalizing() &&
!_Py_CURRENTLY_FINALIZING(tstate))
{
drop_gil(tstate);
PyThread_exit_thread();
}
if (PyThreadState_Swap(tstate) !=NULL)
Py_FatalError("ceval: orphan tstate");
}
/* Check for asynchronous exceptions. */
if (tstate->async_exc!=NULL) {
PyObject*exc=tstate->async_exc;
tstate->async_exc=NULL;
UNSIGNAL_ASYNC_EXC();