OILS / core / process.py View on Github | oils.pub

2182 lines, 1031 significant
1# Copyright 2016 Andy Chu. All rights reserved.
2# Licensed under the Apache License, Version 2.0 (the "License");
3# you may not use this file except in compliance with the License.
4# You may obtain a copy of the License at
5#
6# http://www.apache.org/licenses/LICENSE-2.0
7"""
8process.py - Launch processes and manipulate file descriptors.
9"""
10from __future__ import print_function
11
12from errno import EACCES, EBADF, ECHILD, EINTR, ENOENT, ENOEXEC, EEXIST
13import fcntl as fcntl_
14from fcntl import F_DUPFD, F_GETFD, F_SETFD, FD_CLOEXEC
15from signal import (SIG_DFL, SIG_IGN, SIGINT, SIGPIPE, SIGQUIT, SIGTSTP,
16 SIGTTOU, SIGTTIN, SIGWINCH)
17
18from _devbuild.gen.id_kind_asdl import Id
19from _devbuild.gen.runtime_asdl import (job_state_e, job_state_t,
20 job_state_str, wait_status,
21 wait_status_t, RedirValue,
22 redirect_arg, redirect_arg_e, trace,
23 trace_t)
24from _devbuild.gen.syntax_asdl import (
25 loc_t,
26 redir_loc,
27 redir_loc_e,
28 redir_loc_t,
29)
30from _devbuild.gen.value_asdl import (value, value_e)
31from core import dev
32from core import error
33from core.error import e_die
34from core import pyutil
35from core import pyos
36from core import state
37from display import ui
38from core import util
39from data_lang import j8_lite
40from frontend import location
41from frontend import match
42from mycpp import iolib
43from mycpp import mylib
44from mycpp.mylib import log, print_stderr, probe, tagswitch, iteritems
45
46import posix_ as posix
47from posix_ import (
48 # translated by mycpp and directly called! No wrapper!
49 WIFSIGNALED,
50 WIFEXITED,
51 WIFSTOPPED,
52 WEXITSTATUS,
53 WSTOPSIG,
54 WTERMSIG,
55 WNOHANG,
56 O_APPEND,
57 O_CREAT,
58 O_EXCL,
59 O_NONBLOCK,
60 O_NOCTTY,
61 O_RDONLY,
62 O_RDWR,
63 O_WRONLY,
64 O_TRUNC,
65)
66
67from typing import IO, List, Tuple, Dict, Optional, Any, cast, TYPE_CHECKING
68
69if TYPE_CHECKING:
70 from _devbuild.gen.runtime_asdl import cmd_value
71 from _devbuild.gen.syntax_asdl import command_t
72 from builtin import trap_osh
73 from core import optview
74 from core.util import _DebugFile
75 from osh.cmd_eval import CommandEvaluator
76
77NO_FD = -1
78
79# Minimum file descriptor that the shell can use. Other descriptors can be
80# directly used by user programs, e.g. exec 9>&1
81#
82# Oils uses 100 because users are allowed TWO digits in frontend/lexer_def.py.
83# This is a compromise between bash (unlimited, but requires crazy
84# bookkeeping), and dash/zsh (10) and mksh (24)
85_SHELL_MIN_FD = 100
86
87# Style for 'jobs' builtin
88STYLE_DEFAULT = 0
89STYLE_LONG = 1
90STYLE_PID_ONLY = 2
91
92# To save on allocations in JobList::JobFromSpec()
93CURRENT_JOB_SPECS = ['', '%', '%%', '%+']
94
95
96class ctx_FileCloser(object):
97
98 def __init__(self, f):
99 # type: (mylib.LineReader) -> None
100 self.f = f
101
102 def __enter__(self):
103 # type: () -> None
104 pass
105
106 def __exit__(self, type, value, traceback):
107 # type: (Any, Any, Any) -> None
108 self.f.close()
109
110
111def InitInteractiveShell(signal_safe):
112 # type: (iolib.SignalSafe) -> None
113 """Called when initializing an interactive shell."""
114
115 # The shell itself should ignore Ctrl-\.
116 iolib.sigaction(SIGQUIT, SIG_IGN)
117
118 # This prevents Ctrl-Z from suspending OSH in interactive mode.
119 iolib.sigaction(SIGTSTP, SIG_IGN)
120
121 # More signals from
122 # https://www.gnu.org/software/libc/manual/html_node/Initializing-the-Shell.html
123 # (but not SIGCHLD)
124 iolib.sigaction(SIGTTOU, SIG_IGN)
125 iolib.sigaction(SIGTTIN, SIG_IGN)
126
127 # Register a callback to receive terminal width changes.
128 # NOTE: In line_input.c, we turned off rl_catch_sigwinch.
129
130 # This is ALWAYS on, which means that it can cause EINTR, and wait() and
131 # read() have to handle it
132 iolib.RegisterSignalInterest(SIGWINCH)
133
134
135def SaveFd(fd):
136 # type: (int) -> int
137 saved = fcntl_.fcntl(fd, F_DUPFD, _SHELL_MIN_FD) # type: int
138 return saved
139
140
141class _RedirFrame(object):
142
143 def __init__(self, saved_fd, orig_fd, forget):
144 # type: (int, int, bool) -> None
145 self.saved_fd = saved_fd
146 self.orig_fd = orig_fd
147 self.forget = forget
148
149
150class _FdFrame(object):
151
152 def __init__(self):
153 # type: () -> None
154 self.saved = [] # type: List[_RedirFrame]
155 self.need_wait = [] # type: List[Process]
156
157 def Forget(self):
158 # type: () -> None
159 """For exec 1>&2."""
160 for rf in reversed(self.saved):
161 if rf.saved_fd != NO_FD and rf.forget:
162 posix.close(rf.saved_fd)
163
164 del self.saved[:] # like list.clear() in Python 3.3
165 del self.need_wait[:]
166
167 def __repr__(self):
168 # type: () -> str
169 return '<_FdFrame %s>' % self.saved
170
171
172class FdState(object):
173 """File descriptor state for the current process.
174
175 For example, you can do 'myfunc > out.txt' without forking. Child
176 processes inherit our state.
177 """
178
179 def __init__(
180 self,
181 errfmt, # type: ui.ErrorFormatter
182 job_control, # type: JobControl
183 job_list, # type: JobList
184 mem, # type: state.Mem
185 tracer, # type: Optional[dev.Tracer]
186 waiter, # type: Optional[Waiter]
187 exec_opts, # type: optview.Exec
188 ):
189 # type: (...) -> None
190 """
191 Args:
192 errfmt: for errors
193 job_list: For keeping track of _HereDocWriterThunk
194 """
195 self.errfmt = errfmt
196 self.job_control = job_control
197 self.job_list = job_list
198 self.cur_frame = _FdFrame() # for the top level
199 self.stack = [self.cur_frame]
200 self.mem = mem
201 self.tracer = tracer
202 self.waiter = waiter
203 self.exec_opts = exec_opts
204
205 def Open(self, path):
206 # type: (str) -> mylib.LineReader
207 """Opens a path for read, but moves it out of the reserved 3-9 fd
208 range.
209
210 Returns:
211 A Python file object. The caller is responsible for Close().
212
213 Raises:
214 IOError or OSError if the path can't be found. (This is Python-induced wart)
215 """
216 fd_mode = O_RDONLY
217 f = self._Open(path, 'r', fd_mode)
218
219 # Hacky downcast
220 return cast('mylib.LineReader', f)
221
222 # used for util.DebugFile
223 def OpenForWrite(self, path):
224 # type: (str) -> mylib.Writer
225 fd_mode = O_CREAT | O_RDWR
226 f = self._Open(path, 'w', fd_mode)
227
228 # Hacky downcast
229 return cast('mylib.Writer', f)
230
231 def _Open(self, path, c_mode, fd_mode):
232 # type: (str, str, int) -> IO[str]
233 fd = posix.open(path, fd_mode, 0o666) # may raise OSError
234
235 # Immediately move it to a new location
236 new_fd = SaveFd(fd)
237 posix.close(fd)
238
239 # Return a Python file handle
240 f = posix.fdopen(new_fd, c_mode) # may raise IOError
241 return f
242
243 def _WriteFdToMem(self, fd_name, fd):
244 # type: (str, int) -> None
245 if self.mem:
246 # setvar, not setref
247 state.OshLanguageSetValue(self.mem, location.LName(fd_name),
248 value.Str(str(fd)))
249
250 def _ReadFdFromMem(self, fd_name):
251 # type: (str) -> int
252 val = self.mem.GetValue(fd_name)
253 if val.tag() == value_e.Str:
254 try:
255 return int(cast(value.Str, val).s)
256 except ValueError:
257 return NO_FD
258 return NO_FD
259
260 def _PushSave(self, fd):
261 # type: (int) -> bool
262 """Save fd to a new location and remember to restore it later."""
263 #log('---- _PushSave %s', fd)
264 ok = True
265 try:
266 new_fd = SaveFd(fd)
267 except (IOError, OSError) as e:
268 ok = False
269 # Example program that causes this error: exec 4>&1. Descriptor 4 isn't
270 # open.
271 # This seems to be ignored in dash too in savefd()?
272 if e.errno != EBADF:
273 raise
274 if ok:
275 posix.close(fd)
276 fcntl_.fcntl(new_fd, F_SETFD, FD_CLOEXEC)
277 self.cur_frame.saved.append(_RedirFrame(new_fd, fd, True))
278 else:
279 # if we got EBADF, we still need to close the original on Pop()
280 self._PushClose(fd)
281
282 return ok
283
284 def _PushDup(self, fd1, blame_loc):
285 # type: (int, redir_loc_t) -> int
286 """Save fd2 in a higher range, and dup fd1 onto fd2.
287
288 Returns whether F_DUPFD/dup2 succeeded, and the new descriptor.
289 """
290 UP_loc = blame_loc
291 if blame_loc.tag() == redir_loc_e.VarName:
292 fd2_name = cast(redir_loc.VarName, UP_loc).name
293 try:
294 # F_DUPFD: GREATER than range
295 new_fd = fcntl_.fcntl(fd1, F_DUPFD, _SHELL_MIN_FD) # type: int
296 except (IOError, OSError) as e:
297 if e.errno == EBADF:
298 print_stderr('F_DUPFD fd %d: %s' %
299 (fd1, pyutil.strerror(e)))
300 return NO_FD
301 else:
302 raise # this redirect failed
303
304 self._WriteFdToMem(fd2_name, new_fd)
305
306 elif blame_loc.tag() == redir_loc_e.Fd:
307 fd2 = cast(redir_loc.Fd, UP_loc).fd
308
309 if fd1 == fd2:
310 # The user could have asked for it to be open on descriptor 3, but open()
311 # already returned 3, e.g. echo 3>out.txt
312 return NO_FD
313
314 # Check the validity of fd1 before _PushSave(fd2)
315 try:
316 fcntl_.fcntl(fd1, F_GETFD)
317 except (IOError, OSError) as e:
318 print_stderr('F_GETFD fd %d: %s' % (fd1, pyutil.strerror(e)))
319 raise
320
321 need_restore = self._PushSave(fd2)
322
323 #log('==== dup2 %s %s\n' % (fd1, fd2))
324 try:
325 posix.dup2(fd1, fd2)
326 except (IOError, OSError) as e:
327 # bash/dash give this error too, e.g. for 'echo hi 1>&3'
328 print_stderr('dup2(%d, %d): %s' %
329 (fd1, fd2, pyutil.strerror(e)))
330
331 # Restore and return error
332 if need_restore:
333 rf = self.cur_frame.saved.pop()
334 posix.dup2(rf.saved_fd, rf.orig_fd)
335 posix.close(rf.saved_fd)
336
337 raise # this redirect failed
338
339 new_fd = fd2
340
341 else:
342 raise AssertionError()
343
344 return new_fd
345
346 def _PushCloseFd(self, blame_loc):
347 # type: (redir_loc_t) -> bool
348 """For 2>&-"""
349 # exec {fd}>&- means close the named descriptor
350
351 UP_loc = blame_loc
352 if blame_loc.tag() == redir_loc_e.VarName:
353 fd_name = cast(redir_loc.VarName, UP_loc).name
354 fd = self._ReadFdFromMem(fd_name)
355 if fd == NO_FD:
356 return False
357
358 elif blame_loc.tag() == redir_loc_e.Fd:
359 fd = cast(redir_loc.Fd, UP_loc).fd
360
361 else:
362 raise AssertionError()
363
364 self._PushSave(fd)
365
366 return True
367
368 def _PushClose(self, fd):
369 # type: (int) -> None
370 self.cur_frame.saved.append(_RedirFrame(NO_FD, fd, False))
371
372 def _PushWait(self, proc):
373 # type: (Process) -> None
374 self.cur_frame.need_wait.append(proc)
375
376 def _ApplyRedirect(self, r):
377 # type: (RedirValue) -> None
378 arg = r.arg
379 UP_arg = arg
380 with tagswitch(arg) as case:
381
382 if case(redirect_arg_e.Path):
383 arg = cast(redirect_arg.Path, UP_arg)
384 # noclobber flag is OR'd with other flags when allowed
385 noclobber_mode = O_EXCL if self.exec_opts.noclobber() else 0
386 if r.op_id in (Id.Redir_Great, Id.Redir_AndGreat): # > &>
387 # NOTE: This is different than >| because it respects noclobber, but
388 # that option is almost never used. See test/wild.sh.
389 mode = O_CREAT | O_WRONLY | O_TRUNC | noclobber_mode
390 elif r.op_id == Id.Redir_Clobber: # >|
391 mode = O_CREAT | O_WRONLY | O_TRUNC
392 elif r.op_id in (Id.Redir_DGreat,
393 Id.Redir_AndDGreat): # >> &>>
394 mode = O_CREAT | O_WRONLY | O_APPEND | noclobber_mode
395 elif r.op_id == Id.Redir_Less: # <
396 mode = O_RDONLY
397 elif r.op_id == Id.Redir_LessGreat: # <>
398 mode = O_CREAT | O_RDWR
399 else:
400 raise NotImplementedError(r.op_id)
401
402 # NOTE: 0666 is affected by umask, all shells use it.
403 try:
404 open_fd = posix.open(arg.filename, mode, 0o666)
405 except (IOError, OSError) as e:
406 if e.errno == EEXIST and self.exec_opts.noclobber():
407 extra = ' (noclobber)'
408 else:
409 extra = ''
410 self.errfmt.Print_(
411 "Can't open %r: %s%s" %
412 (arg.filename, pyutil.strerror(e), extra),
413 blame_loc=r.op_loc)
414 raise # redirect failed
415
416 new_fd = self._PushDup(open_fd, r.loc)
417 if new_fd != NO_FD:
418 posix.close(open_fd)
419
420 # Now handle &> and &>> and their variants. These pairs are the same:
421 #
422 # stdout_stderr.py &> out-err.txt
423 # stdout_stderr.py > out-err.txt 2>&1
424 #
425 # stdout_stderr.py 3&> out-err.txt
426 # stdout_stderr.py 3> out-err.txt 2>&3
427 #
428 # Ditto for {fd}> and {fd}&>
429
430 if r.op_id in (Id.Redir_AndGreat, Id.Redir_AndDGreat):
431 self._PushDup(new_fd, redir_loc.Fd(2))
432
433 elif case(redirect_arg_e.CopyFd): # e.g. echo hi 1>&2
434 arg = cast(redirect_arg.CopyFd, UP_arg)
435
436 if r.op_id == Id.Redir_GreatAnd: # 1>&2
437 self._PushDup(arg.target_fd, r.loc)
438
439 elif r.op_id == Id.Redir_LessAnd: # 0<&5
440 # The only difference between >& and <& is the default file
441 # descriptor argument.
442 self._PushDup(arg.target_fd, r.loc)
443
444 else:
445 raise NotImplementedError()
446
447 elif case(redirect_arg_e.MoveFd): # e.g. echo hi 5>&6-
448 arg = cast(redirect_arg.MoveFd, UP_arg)
449 new_fd = self._PushDup(arg.target_fd, r.loc)
450 if new_fd != NO_FD:
451 posix.close(arg.target_fd)
452
453 UP_loc = r.loc
454 if r.loc.tag() == redir_loc_e.Fd:
455 fd = cast(redir_loc.Fd, UP_loc).fd
456 else:
457 fd = NO_FD
458
459 self.cur_frame.saved.append(_RedirFrame(new_fd, fd, False))
460
461 elif case(redirect_arg_e.CloseFd): # e.g. echo hi 5>&-
462 self._PushCloseFd(r.loc)
463
464 elif case(redirect_arg_e.HereDoc):
465 arg = cast(redirect_arg.HereDoc, UP_arg)
466
467 # NOTE: Do these descriptors have to be moved out of the range 0-9?
468 read_fd, write_fd = posix.pipe()
469
470 self._PushDup(read_fd, r.loc) # stdin is now the pipe
471
472 # We can't close like we do in the filename case above? The writer can
473 # get a "broken pipe".
474 self._PushClose(read_fd)
475
476 thunk = _HereDocWriterThunk(write_fd, arg.body)
477
478 # Use PIPE_SIZE to save a process in the case of small here
479 # docs, which are the common case. (dash does this.)
480
481 # Note: could instrument this to see how often it happens.
482 # Though strace -ff can also work.
483 start_process = len(arg.body) > 4096
484 #start_process = True
485
486 if start_process:
487 here_proc = Process(thunk, self.job_control, self.job_list,
488 self.tracer)
489
490 # NOTE: we could close the read pipe here, but it doesn't really
491 # matter because we control the code.
492 here_proc.StartProcess(trace.HereDoc)
493 #log('Started %s as %d', here_proc, pid)
494 self._PushWait(here_proc)
495
496 # Now that we've started the child, close it in the parent.
497 posix.close(write_fd)
498
499 else:
500 posix.write(write_fd, arg.body)
501 posix.close(write_fd)
502
503 def Push(self, redirects, err_out):
504 # type: (List[RedirValue], List[error.IOError_OSError]) -> None
505 """Apply a group of redirects and remember to undo them."""
506
507 #log('> fd_state.Push %s', redirects)
508 new_frame = _FdFrame()
509 self.stack.append(new_frame)
510 self.cur_frame = new_frame
511
512 for r in redirects:
513 #log('apply %s', r)
514 with ui.ctx_Location(self.errfmt, r.op_loc):
515 try:
516 self._ApplyRedirect(r)
517 except (IOError, OSError) as e:
518 err_out.append(e)
519 # This can fail too
520 self.Pop(err_out)
521 return # for bad descriptor, etc.
522
523 def PushStdinFromPipe(self, r):
524 # type: (int) -> bool
525 """Save the current stdin and make it come from descriptor 'r'.
526
527 'r' is typically the read-end of a pipe. For 'lastpipe'/ZSH
528 semantics of
529
530 echo foo | read line; echo $line
531 """
532 new_frame = _FdFrame()
533 self.stack.append(new_frame)
534 self.cur_frame = new_frame
535
536 self._PushDup(r, redir_loc.Fd(0))
537 return True
538
539 def Pop(self, err_out):
540 # type: (List[error.IOError_OSError]) -> None
541 frame = self.stack.pop()
542 #log('< Pop %s', frame)
543 for rf in reversed(frame.saved):
544 if rf.saved_fd == NO_FD:
545 #log('Close %d', orig)
546 try:
547 posix.close(rf.orig_fd)
548 except (IOError, OSError) as e:
549 err_out.append(e)
550 log('Error closing descriptor %d: %s', rf.orig_fd,
551 pyutil.strerror(e))
552 return
553 else:
554 try:
555 posix.dup2(rf.saved_fd, rf.orig_fd)
556 except (IOError, OSError) as e:
557 err_out.append(e)
558 log('dup2(%d, %d) error: %s', rf.saved_fd, rf.orig_fd,
559 pyutil.strerror(e))
560 #log('fd state:')
561 #posix.system('ls -l /proc/%s/fd' % posix.getpid())
562 return
563 posix.close(rf.saved_fd)
564 #log('dup2 %s %s', saved, orig)
565
566 # Wait for here doc processes to finish.
567 for proc in frame.need_wait:
568 unused_status = proc.Wait(self.waiter)
569
570 def MakePermanent(self):
571 # type: () -> None
572 self.cur_frame.Forget()
573
574
575class ChildStateChange(object):
576
577 def __init__(self):
578 # type: () -> None
579 """Empty constructor for mycpp."""
580 pass
581
582 def Apply(self):
583 # type: () -> None
584 raise NotImplementedError()
585
586 def ApplyFromParent(self, proc):
587 # type: (Process) -> None
588 """Noop for all state changes other than SetPgid for mycpp."""
589 pass
590
591
592class StdinFromPipe(ChildStateChange):
593
594 def __init__(self, pipe_read_fd, w):
595 # type: (int, int) -> None
596 self.r = pipe_read_fd
597 self.w = w
598
599 def __repr__(self):
600 # type: () -> str
601 return '<StdinFromPipe %d %d>' % (self.r, self.w)
602
603 def Apply(self):
604 # type: () -> None
605 posix.dup2(self.r, 0)
606 posix.close(self.r) # close after dup
607
608 posix.close(self.w) # we're reading from the pipe, not writing
609 #log('child CLOSE w %d pid=%d', self.w, posix.getpid())
610
611
612class StdoutToPipe(ChildStateChange):
613
614 def __init__(self, r, pipe_write_fd):
615 # type: (int, int) -> None
616 self.r = r
617 self.w = pipe_write_fd
618
619 def __repr__(self):
620 # type: () -> str
621 return '<StdoutToPipe %d %d>' % (self.r, self.w)
622
623 def Apply(self):
624 # type: () -> None
625 posix.dup2(self.w, 1)
626 posix.close(self.w) # close after dup
627
628 posix.close(self.r) # we're writing to the pipe, not reading
629 #log('child CLOSE r %d pid=%d', self.r, posix.getpid())
630
631
632class StderrToPipe(ChildStateChange):
633
634 def __init__(self, r, pipe_write_fd):
635 # type: (int, int) -> None
636 self.r = r
637 self.w = pipe_write_fd
638
639 def __repr__(self):
640 # type: () -> str
641 return '<StderrToPipe %d %d>' % (self.r, self.w)
642
643 def Apply(self):
644 # type: () -> None
645 posix.dup2(self.w, 2)
646 posix.close(self.w) # close after dup
647
648 posix.close(self.r) # we're writing to the pipe, not reading
649 #log('child CLOSE r %d pid=%d', self.r, posix.getpid())
650
651
652INVALID_PGID = -1
653# argument to setpgid() that means the process is its own leader
654OWN_LEADER = 0
655
656
657class SetPgid(ChildStateChange):
658
659 def __init__(self, pgid, tracer):
660 # type: (int, dev.Tracer) -> None
661 self.pgid = pgid
662 self.tracer = tracer
663
664 def Apply(self):
665 # type: () -> None
666 try:
667 posix.setpgid(0, self.pgid)
668 except (IOError, OSError) as e:
669 self.tracer.OtherMessage(
670 'osh: child %d failed to set its process group to %d: %s' %
671 (posix.getpid(), self.pgid, pyutil.strerror(e)))
672
673 def ApplyFromParent(self, proc):
674 # type: (Process) -> None
675 try:
676 posix.setpgid(proc.pid, self.pgid)
677 except (IOError, OSError) as e:
678 self.tracer.OtherMessage(
679 'osh: parent failed to set process group for PID %d to %d: %s'
680 % (proc.pid, self.pgid, pyutil.strerror(e)))
681
682
683class ExternalProgram(object):
684 """The capability to execute an external program like 'ls'."""
685
686 def __init__(
687 self,
688 hijack_shebang, # type: str
689 fd_state, # type: FdState
690 errfmt, # type: ui.ErrorFormatter
691 debug_f, # type: _DebugFile
692 ):
693 # type: (...) -> None
694 """
695 Args:
696 hijack_shebang: The path of an interpreter to run instead of the one
697 specified in the shebang line. May be empty.
698 """
699 self.hijack_shebang = hijack_shebang
700 self.fd_state = fd_state
701 self.errfmt = errfmt
702 self.debug_f = debug_f
703
704 def Exec(self, argv0_path, cmd_val, environ):
705 # type: (str, cmd_value.Argv, Dict[str, str]) -> None
706 """Execute a program and exit this process.
707
708 Called by: ls / exec ls / ( ls / )
709 """
710 probe('process', 'ExternalProgram_Exec', argv0_path)
711 self._Exec(argv0_path, cmd_val.argv, cmd_val.arg_locs[0], environ,
712 True)
713 assert False, "This line should never execute" # NO RETURN
714
715 def _Exec(self, argv0_path, argv, argv0_loc, environ, should_retry):
716 # type: (str, List[str], loc_t, Dict[str, str], bool) -> None
717 if len(self.hijack_shebang):
718 opened = True
719 try:
720 f = self.fd_state.Open(argv0_path)
721 except (IOError, OSError) as e:
722 opened = False
723
724 if opened:
725 with ctx_FileCloser(f):
726 # Test if the shebang looks like a shell. TODO: The file might be
727 # binary with no newlines, so read 80 bytes instead of readline().
728
729 #line = f.read(80) # type: ignore # TODO: fix this
730 line = f.readline()
731
732 if match.ShouldHijack(line):
733 h_argv = [self.hijack_shebang, argv0_path]
734 h_argv.extend(argv[1:])
735 argv = h_argv
736 argv0_path = self.hijack_shebang
737 self.debug_f.writeln('Hijacked: %s' % argv0_path)
738 else:
739 #self.debug_f.log('Not hijacking %s (%r)', argv, line)
740 pass
741
742 try:
743 posix.execve(argv0_path, argv, environ)
744 except (IOError, OSError) as e:
745 # Run with /bin/sh when ENOEXEC error (no shebang). All shells do this.
746 if e.errno == ENOEXEC and should_retry:
747 new_argv = ['/bin/sh', argv0_path]
748 new_argv.extend(argv[1:])
749 self._Exec('/bin/sh', new_argv, argv0_loc, environ, False)
750 # NO RETURN
751
752 # Would be nice: when the path is relative and ENOENT: print PWD and do
753 # spelling correction?
754
755 self.errfmt.Print_(
756 "Can't execute %r: %s" % (argv0_path, pyutil.strerror(e)),
757 argv0_loc)
758
759 # POSIX mentions 126 and 127 for two specific errors. The rest are
760 # unspecified.
761 #
762 # http://pubs.opengroup.org/onlinepubs/9699919799.2016edition/utilities/V3_chap02.html#tag_18_08_02
763 if e.errno == EACCES:
764 status = 126
765 elif e.errno == ENOENT:
766 # TODO: most shells print 'command not found', rather than strerror()
767 # == "No such file or directory". That's better because it's at the
768 # end of the path search, and we're never searching for a directory.
769 status = 127
770 else:
771 # dash uses 2, but we use that for parse errors. This seems to be
772 # consistent with mksh and zsh.
773 status = 127
774
775 posix._exit(status)
776 # NO RETURN
777
778
779class Thunk(object):
780 """Abstract base class for things runnable in another process."""
781
782 def __init__(self):
783 # type: () -> None
784 """Empty constructor for mycpp."""
785 pass
786
787 def Run(self):
788 # type: () -> None
789 """Returns a status code."""
790 raise NotImplementedError()
791
792 def UserString(self):
793 # type: () -> str
794 """Display for the 'jobs' list."""
795 raise NotImplementedError()
796
797 def __repr__(self):
798 # type: () -> str
799 return self.UserString()
800
801
802class ExternalThunk(Thunk):
803 """An external executable."""
804
805 def __init__(self, ext_prog, argv0_path, cmd_val, environ):
806 # type: (ExternalProgram, str, cmd_value.Argv, Dict[str, str]) -> None
807 self.ext_prog = ext_prog
808 self.argv0_path = argv0_path
809 self.cmd_val = cmd_val
810 self.environ = environ
811
812 def UserString(self):
813 # type: () -> str
814
815 # NOTE: This is the format the Tracer uses.
816 # bash displays sleep $n & (code)
817 # but OSH displays sleep 1 & (argv array)
818 # We could switch the former but I'm not sure it's necessary.
819 tmp = [j8_lite.MaybeShellEncode(a) for a in self.cmd_val.argv]
820 return '[process] %s' % ' '.join(tmp)
821
822 def Run(self):
823 # type: () -> None
824 """An ExternalThunk is run in parent for the exec builtin."""
825 self.ext_prog.Exec(self.argv0_path, self.cmd_val, self.environ)
826
827
828class SubProgramThunk(Thunk):
829 """A subprogram that can be executed in another process."""
830
831 def __init__(
832 self,
833 cmd_ev, # type: CommandEvaluator
834 node, # type: command_t
835 trap_state, # type: trap_osh.TrapState
836 multi_trace, # type: dev.MultiTracer
837 inherit_errexit, # type: bool
838 inherit_errtrace, # type: bool
839 ):
840 # type: (...) -> None
841 self.cmd_ev = cmd_ev
842 self.node = node
843 self.trap_state = trap_state
844 self.multi_trace = multi_trace
845 self.inherit_errexit = inherit_errexit # for bash errexit compatibility
846 self.inherit_errtrace = inherit_errtrace # for bash errtrace compatibility
847
848 def UserString(self):
849 # type: () -> str
850
851 # NOTE: These can be pieces of a pipeline, so they're arbitrary nodes.
852 # TODO: Extract SPIDS from node to display source? Note that
853 # CompoundStatus also has locations of each pipeline component; see
854 # Executor.RunPipeline()
855 thunk_str = ui.CommandType(self.node)
856 return '[subprog] %s' % thunk_str
857
858 def Run(self):
859 # type: () -> None
860 #self.errfmt.OneLineErrExit() # don't quote code in child processes
861 probe('process', 'SubProgramThunk_Run')
862
863 # TODO: break circular dep. Bit flags could go in ASDL or headers.
864 from osh import cmd_eval
865
866 # signal handlers aren't inherited
867 self.trap_state.ClearForSubProgram(self.inherit_errtrace)
868
869 # NOTE: may NOT return due to exec().
870 if not self.inherit_errexit:
871 self.cmd_ev.mutable_opts.DisableErrExit()
872 try:
873 # optimize to eliminate redundant subshells like ( echo hi ) | wc -l etc.
874 self.cmd_ev.ExecuteAndCatch(
875 self.node,
876 cmd_eval.OptimizeSubshells | cmd_eval.MarkLastCommands)
877 status = self.cmd_ev.LastStatus()
878 # NOTE: We ignore the is_fatal return value. The user should set -o
879 # errexit so failures in subprocesses cause failures in the parent.
880 except util.UserExit as e:
881 status = e.status
882
883 # Handle errors in a subshell. These two cases are repeated from main()
884 # and the core/completion.py hook.
885 except KeyboardInterrupt:
886 print('')
887 status = 130 # 128 + 2
888 except (IOError, OSError) as e:
889 print_stderr('oils I/O error (subprogram): %s' %
890 pyutil.strerror(e))
891 status = 2
892
893 # If ProcessInit() doesn't turn off buffering, this is needed before
894 # _exit()
895 pyos.FlushStdout()
896
897 self.multi_trace.WriteDumps()
898
899 # We do NOT want to raise SystemExit here. Otherwise dev.Tracer::Pop()
900 # gets called in BOTH processes.
901 # The crash dump seems to be unaffected.
902 posix._exit(status)
903
904
905class _HereDocWriterThunk(Thunk):
906 """Write a here doc to one end of a pipe.
907
908 May be be executed in either a child process or the main shell
909 process.
910 """
911
912 def __init__(self, w, body_str):
913 # type: (int, str) -> None
914 self.w = w
915 self.body_str = body_str
916
917 def UserString(self):
918 # type: () -> str
919
920 # You can hit Ctrl-Z and the here doc writer will be suspended! Other
921 # shells don't have this problem because they use temp files! That's a bit
922 # unfortunate.
923 return '[here doc writer]'
924
925 def Run(self):
926 # type: () -> None
927 """do_exit: For small pipelines."""
928 probe('process', 'HereDocWriterThunk_Run')
929 #log('Writing %r', self.body_str)
930 posix.write(self.w, self.body_str)
931 #log('Wrote %r', self.body_str)
932 posix.close(self.w)
933 #log('Closed %d', self.w)
934
935 posix._exit(0)
936
937
938class Job(object):
939 """Interface for both Process and Pipeline.
940
941 They both can be put in the background and waited on.
942
943 Confusing thing about pipelines in the background: They have TOO MANY NAMES.
944
945 sleep 1 | sleep 2 &
946
947 - The LAST PID is what's printed at the prompt. This is $!, a PROCESS ID and
948 not a JOB ID.
949 # https://www.gnu.org/software/bash/manual/html_node/Special-Parameters.html#Special-Parameters
950 - The process group leader (setpgid) is the FIRST PID.
951 - It's also %1 or %+. The last job started.
952 """
953
954 def __init__(self):
955 # type: () -> None
956 # Initial state with & or Ctrl-Z is Running.
957 self.state = job_state_e.Running
958 self.job_id = -1
959 self.in_background = False
960
961 def DisplayJob(self, job_id, f, style):
962 # type: (int, mylib.Writer, int) -> None
963 raise NotImplementedError()
964
965 def State(self):
966 # type: () -> job_state_t
967 return self.state
968
969 def ProcessGroupId(self):
970 # type: () -> int
971 """Return the process group ID associated with this job."""
972 raise NotImplementedError()
973
974 def PidForWait(self):
975 # type: () -> int
976 """Return the pid we can wait on."""
977 raise NotImplementedError()
978
979 def JobWait(self, waiter):
980 # type: (Waiter) -> wait_status_t
981 """Wait for this process/pipeline to be stopped or finished."""
982 raise NotImplementedError()
983
984 def SetBackground(self):
985 # type: () -> None
986 """Record that this job is running in the background."""
987 self.in_background = True
988
989 def SetForeground(self):
990 # type: () -> None
991 """Record that this job is running in the foreground."""
992 self.in_background = False
993
994
995class Process(Job):
996 """A process to run.
997
998 TODO: Should we make it clear that this is a FOREGROUND process? A
999 background process is wrapped in a "job". It is unevaluated.
1000
1001 It provides an API to manipulate file descriptor state in parent and child.
1002 """
1003
1004 def __init__(self, thunk, job_control, job_list, tracer):
1005 # type: (Thunk, JobControl, JobList, dev.Tracer) -> None
1006 """
1007 Args:
1008 thunk: Thunk instance
1009 job_list: for process bookkeeping
1010 """
1011 Job.__init__(self)
1012 assert isinstance(thunk, Thunk), thunk
1013 self.thunk = thunk
1014 self.job_control = job_control
1015 self.job_list = job_list
1016 self.tracer = tracer
1017 self.exec_opts = tracer.exec_opts
1018
1019 # For pipelines
1020 self.parent_pipeline = None # type: Pipeline
1021 self.state_changes = [] # type: List[ChildStateChange]
1022 self.close_r = -1
1023 self.close_w = -1
1024
1025 self.pid = -1
1026 self.status = -1
1027
1028 def Init_ParentPipeline(self, pi):
1029 # type: (Pipeline) -> None
1030 """For updating PIPESTATUS."""
1031 self.parent_pipeline = pi
1032
1033 def __repr__(self):
1034 # type: () -> str
1035
1036 # note: be wary of infinite mutual recursion
1037 #s = ' %s' % self.parent_pipeline if self.parent_pipeline else ''
1038 #return '<Process %s%s>' % (self.thunk, s)
1039 return '<Process pid=%d state=%s %s>' % (
1040 self.pid, _JobStateStr(self.state), self.thunk)
1041
1042 def ProcessGroupId(self):
1043 # type: () -> int
1044 """Returns the group ID of this process."""
1045 # This should only ever be called AFTER the process has started
1046 assert self.pid != -1
1047 if self.parent_pipeline:
1048 # XXX: Maybe we should die here instead? Unclear if this branch
1049 # should even be reachable with the current builtins.
1050 return self.parent_pipeline.ProcessGroupId()
1051
1052 return self.pid
1053
1054 def PidForWait(self):
1055 # type: () -> int
1056 """Return the pid we can wait on."""
1057 assert self.pid != -1
1058 return self.pid
1059
1060 def DisplayJob(self, job_id, f, style):
1061 # type: (int, mylib.Writer, int) -> None
1062 if job_id == -1:
1063 job_id_str = ' '
1064 else:
1065 job_id_str = '%%%d' % job_id
1066 if style == STYLE_PID_ONLY:
1067 f.write('%d\n' % self.pid)
1068 else:
1069 f.write('%s %d %7s ' %
1070 (job_id_str, self.pid, _JobStateStr(self.state)))
1071 f.write(self.thunk.UserString())
1072 f.write('\n')
1073
1074 def AddStateChange(self, s):
1075 # type: (ChildStateChange) -> None
1076 self.state_changes.append(s)
1077
1078 def AddPipeToClose(self, r, w):
1079 # type: (int, int) -> None
1080 self.close_r = r
1081 self.close_w = w
1082
1083 def MaybeClosePipe(self):
1084 # type: () -> None
1085 if self.close_r != -1:
1086 posix.close(self.close_r)
1087 posix.close(self.close_w)
1088
1089 def StartProcess(self, why):
1090 # type: (trace_t) -> int
1091 """Start this process with fork(), handling redirects."""
1092 pid = posix.fork()
1093 if pid < 0:
1094 # When does this happen?
1095 e_die('Fatal error in posix.fork()')
1096
1097 elif pid == 0: # child
1098 # Note: this happens in BOTH interactive and non-interactive shells.
1099 # We technically don't need to do most of it in non-interactive, since we
1100 # did not change state in InitInteractiveShell().
1101
1102 for st in self.state_changes:
1103 st.Apply()
1104
1105 # Python sets SIGPIPE handler to SIG_IGN by default. Child processes
1106 # shouldn't have this.
1107 # https://docs.python.org/2/library/signal.html
1108 # See Python/pythonrun.c.
1109 iolib.sigaction(SIGPIPE, SIG_DFL)
1110
1111 # Respond to Ctrl-\ (core dump)
1112 iolib.sigaction(SIGQUIT, SIG_DFL)
1113
1114 # Only standalone children should get Ctrl-Z. Pipelines remain in the
1115 # foreground because suspending them is difficult with our 'lastpipe'
1116 # semantics.
1117 pid = posix.getpid()
1118 if posix.getpgid(0) == pid and self.parent_pipeline is None:
1119 iolib.sigaction(SIGTSTP, SIG_DFL)
1120
1121 # More signals from
1122 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html
1123 # (but not SIGCHLD)
1124 iolib.sigaction(SIGTTOU, SIG_DFL)
1125 iolib.sigaction(SIGTTIN, SIG_DFL)
1126
1127 self.tracer.OnNewProcess(pid)
1128 # clear foreground pipeline for subshells
1129 self.thunk.Run()
1130 # Never returns
1131
1132 #log('STARTED process %s, pid = %d', self, pid)
1133 self.tracer.OnProcessStart(pid, why)
1134
1135 # Class invariant: after the process is started, it stores its PID.
1136 self.pid = pid
1137
1138 # SetPgid needs to be applied from the child and the parent to avoid
1139 # racing in calls to tcsetpgrp() in the parent. See APUE sec. 9.2.
1140 for st in self.state_changes:
1141 st.ApplyFromParent(self)
1142
1143 # Program invariant: We keep track of every child process!
1144 # Waiter::WaitForOne() needs it to update state
1145 self.job_list.AddChildProcess(pid, self)
1146
1147 return pid
1148
1149 def Wait(self, waiter):
1150 # type: (Waiter) -> int
1151 """Wait for this Process to finish."""
1152 # Keep waiting if waitpid() was interrupted with a signal (unlike the
1153 # 'wait' builtin)
1154 while self.state == job_state_e.Running:
1155 result, _ = waiter.WaitForOne()
1156 if result == W1_NO_CHILDREN:
1157 break
1158
1159 # Linear search
1160 # if we get a W1_EXITED event, and the pid is OUR PID, then we can
1161 # return?
1162 # well we need the status too
1163
1164 # Cleanup - for background jobs this happens in the 'wait' builtin,
1165 # e.g. after JobWait()
1166 if self.state == job_state_e.Exited:
1167 self.job_list.PopChildProcess(self.pid)
1168
1169 assert self.status >= 0, self.status
1170 return self.status
1171
1172 def JobWait(self, waiter):
1173 # type: (Waiter) -> wait_status_t
1174 """Process::JobWait, called by wait builtin"""
1175 # wait builtin can be interrupted
1176 while self.state == job_state_e.Running:
1177 result, w1_arg = waiter.WaitForOne() # mutates self.state
1178
1179 if result == W1_CALL_INTR:
1180 return wait_status.Cancelled(w1_arg)
1181
1182 if result == W1_NO_CHILDREN:
1183 break
1184
1185 # Ignore W1_EXITED, W1_STOPPED - these are OTHER processes
1186
1187 assert self.status >= 0, self.status
1188 return wait_status.Proc(self.state, self.status)
1189
1190 def WhenContinued(self):
1191 # type: () -> None
1192 self.state = job_state_e.Running
1193
1194 if self.parent_pipeline:
1195 # TODO: do we need anything here?
1196 pass
1197
1198 # TODO: Should we remove it as a job?
1199
1200 # Now job_id is set
1201 if self.exec_opts.interactive():
1202 #if 0:
1203 print_stderr('[%%%d] PID %d Continued' % (self.job_id, self.pid))
1204
1205 #if self.in_background:
1206 if 1:
1207 self.job_control.MaybeTakeTerminal()
1208 self.SetForeground()
1209
1210 def WhenStopped(self, stop_sig):
1211 # type: (int) -> None
1212 """Called by the Waiter when this Process is stopped."""
1213 # 128 is a shell thing
1214 # https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html
1215 self.status = 128 + stop_sig
1216 self.state = job_state_e.Stopped
1217
1218 if self.parent_pipeline:
1219 # TODO: do we need anything here?
1220 # We need AllStopped() just like AllExited()?
1221
1222 #self.parent_pipeline.WhenPartIsStopped(pid, status)
1223 #return
1224 pass
1225
1226 if self.job_id == -1:
1227 # This process was started in the foreground, not with &. So it
1228 # was NOT a job, but after Ctrl-Z, it's a job.
1229 self.job_list.RegisterJob(self)
1230
1231 # Now self.job_id is set
1232 if self.exec_opts.interactive():
1233 print_stderr('') # newline after ^Z (TODO: consolidate with ^C)
1234 print_stderr('[%%%d] PID %d Stopped with signal %d' %
1235 (self.job_id, self.pid, stop_sig))
1236
1237 if not self.in_background:
1238 # e.g. sleep 5; then Ctrl-Z
1239 self.job_control.MaybeTakeTerminal()
1240 self.SetBackground()
1241
1242 def WhenExited(self, pid, status):
1243 # type: (int, int) -> None
1244 """Called by the Waiter when this Process exits."""
1245
1246 #log('Process WhenExited %d %d', pid, status)
1247 assert pid == self.pid, 'Expected %d, got %d' % (self.pid, pid)
1248 self.status = status
1249 self.state = job_state_e.Exited
1250
1251 if self.parent_pipeline:
1252 # populate pipeline status array; update Pipeline state, etc.
1253 self.parent_pipeline.WhenPartExited(pid, status)
1254 return
1255
1256 if self.job_id != -1 and self.in_background:
1257 # TODO: ONE condition should determine if this was a background
1258 # job, rather than a foreground process
1259 # "Job might have been brought to the foreground after being
1260 # assigned a job ID"
1261 if self.exec_opts.interactive():
1262 print_stderr('[%%%d] PID %d Done' % (self.job_id, self.pid))
1263
1264 if not self.in_background:
1265 self.job_control.MaybeTakeTerminal()
1266
1267 def RunProcess(self, waiter, why):
1268 # type: (Waiter, trace_t) -> int
1269 """Run this process synchronously."""
1270 self.StartProcess(why)
1271 # ShellExecutor might be calling this for the last part of a pipeline.
1272 if self.parent_pipeline is None:
1273 # QUESTION: Can the PGID of a single process just be the PID? i.e. avoid
1274 # calling getpgid()?
1275 self.job_control.MaybeGiveTerminal(posix.getpgid(self.pid))
1276 return self.Wait(waiter)
1277
1278
1279class ctx_Pipe(object):
1280
1281 def __init__(self, fd_state, fd, err_out):
1282 # type: (FdState, int, List[error.IOError_OSError]) -> None
1283 fd_state.PushStdinFromPipe(fd)
1284 self.fd_state = fd_state
1285 self.err_out = err_out
1286
1287 def __enter__(self):
1288 # type: () -> None
1289 pass
1290
1291 def __exit__(self, type, value, traceback):
1292 # type: (Any, Any, Any) -> None
1293 self.fd_state.Pop(self.err_out)
1294
1295
1296class Pipeline(Job):
1297 """A pipeline of processes to run.
1298
1299 Cases we handle:
1300
1301 foo | bar
1302 $(foo | bar)
1303 foo | bar | read v
1304 """
1305
1306 def __init__(self, sigpipe_status_ok, job_control, job_list, tracer):
1307 # type: (bool, JobControl, JobList, dev.Tracer) -> None
1308 Job.__init__(self)
1309 self.job_control = job_control
1310 self.job_list = job_list
1311 self.tracer = tracer
1312 self.exec_opts = tracer.exec_opts
1313
1314 self.procs = [] # type: List[Process]
1315 self.pids = [] # type: List[int] # pids in order
1316 self.pipe_status = [] # type: List[int] # status in order
1317 self.status = -1 # for 'wait' jobs
1318
1319 self.pgid = INVALID_PGID
1320
1321 # Optional for foreground
1322 self.last_thunk = None # type: Tuple[CommandEvaluator, command_t]
1323 self.last_pipe = None # type: Tuple[int, int]
1324
1325 self.sigpipe_status_ok = sigpipe_status_ok
1326
1327 def __repr__(self):
1328 # type: () -> str
1329 return '<Pipeline pgid=%d pids=%s state=%s procs=%s>' % (
1330 self.pgid, self.pids, _JobStateStr(self.state), self.procs)
1331
1332 def ProcessGroupId(self):
1333 # type: () -> int
1334 """Returns the group ID of this pipeline.
1335
1336 In an interactive shell, it's often the FIRST.
1337 """
1338 return self.pgid
1339
1340 def PidForWait(self):
1341 # type: () -> int
1342 """Return the PID we can wait on.
1343
1344 This is the same as the PID for $!
1345
1346 Shell WART:
1347 The $! variable is the PID of the LAST pipeline part.
1348 But in an interactive shell, the PGID is the PID of the FIRST pipeline part.
1349 It would be nicer if these were consistent!
1350 """
1351 return self.pids[-1]
1352
1353 def DisplayJob(self, job_id, f, style):
1354 # type: (int, mylib.Writer, int) -> None
1355 if style == STYLE_PID_ONLY:
1356 f.write('%d\n' % self.procs[0].pid)
1357 else:
1358 # Note: this is STYLE_LONG.
1359 for i, proc in enumerate(self.procs):
1360 if i == 0: # show job ID for first element in pipeline
1361 job_id_str = '%%%d' % job_id
1362 else:
1363 job_id_str = ' ' # 2 spaces
1364
1365 f.write('%s %d %7s ' %
1366 (job_id_str, proc.pid, _JobStateStr(proc.state)))
1367 f.write(proc.thunk.UserString())
1368 f.write('\n')
1369
1370 def DebugPrint(self):
1371 # type: () -> None
1372 print('Pipeline in state %s' % _JobStateStr(self.state))
1373 if mylib.PYTHON: # %s for Process not allowed in C++
1374 for proc in self.procs:
1375 print(' proc %s' % proc)
1376 _, last_node = self.last_thunk
1377 print(' last %s' % last_node)
1378 print(' pipe_status %s' % self.pipe_status)
1379
1380 def Add(self, p):
1381 # type: (Process) -> None
1382 """Append a process to the pipeline."""
1383 if len(self.procs) == 0:
1384 self.procs.append(p)
1385 return
1386
1387 r, w = posix.pipe()
1388 #log('pipe for %s: %d %d', p, r, w)
1389 prev = self.procs[-1]
1390
1391 prev.AddStateChange(StdoutToPipe(r, w)) # applied on StartPipeline()
1392 p.AddStateChange(StdinFromPipe(r, w)) # applied on StartPipeline()
1393
1394 p.AddPipeToClose(r, w) # MaybeClosePipe() on StartPipeline()
1395
1396 self.procs.append(p)
1397
1398 def AddLast(self, thunk):
1399 # type: (Tuple[CommandEvaluator, command_t]) -> None
1400 """Append the last noden to the pipeline.
1401
1402 This is run in the CURRENT process. It is OPTIONAL, because
1403 pipelines in the background are run uniformly.
1404 """
1405 self.last_thunk = thunk
1406
1407 assert len(self.procs) != 0
1408
1409 r, w = posix.pipe()
1410 prev = self.procs[-1]
1411 prev.AddStateChange(StdoutToPipe(r, w))
1412
1413 self.last_pipe = (r, w) # So we can connect it to last_thunk
1414
1415 def StartPipeline(self, waiter):
1416 # type: (Waiter) -> None
1417
1418 # If we are creating a pipeline in a subshell or we aren't running with job
1419 # control, our children should remain in our inherited process group.
1420 # the pipelines's group ID.
1421 if self.job_control.Enabled():
1422 self.pgid = OWN_LEADER # first process in pipeline is the leader
1423
1424 for i, proc in enumerate(self.procs):
1425 if self.pgid != INVALID_PGID:
1426 proc.AddStateChange(SetPgid(self.pgid, self.tracer))
1427
1428 # Figure out the pid
1429 pid = proc.StartProcess(trace.PipelinePart)
1430 if i == 0 and self.pgid != INVALID_PGID:
1431 # Mimic bash and use the PID of the FIRST process as the group
1432 # for the whole pipeline.
1433 self.pgid = pid
1434
1435 self.pids.append(pid)
1436 self.pipe_status.append(-1) # uninitialized
1437
1438 # NOTE: This is done in the SHELL PROCESS after every fork() call.
1439 # It can't be done at the end; otherwise processes will have descriptors
1440 # from non-adjacent pipes.
1441 proc.MaybeClosePipe()
1442
1443 if self.last_thunk:
1444 self.pipe_status.append(-1) # for self.last_thunk
1445
1446 #log('Started pipeline PIDS=%s, pgid=%d', self.pids, self.pgid)
1447
1448 def Wait(self, waiter):
1449 # type: (Waiter) -> List[int]
1450 """Wait for this Pipeline to finish."""
1451
1452 assert self.procs, "no procs for Wait()"
1453 # waitpid(-1) zero or more times
1454 while self.state == job_state_e.Running:
1455 # Keep waiting until there's nothing to wait for.
1456 result, _ = waiter.WaitForOne()
1457 if result == W1_NO_CHILDREN:
1458 break
1459
1460 return self.pipe_status
1461
1462 def JobWait(self, waiter):
1463 # type: (Waiter) -> wait_status_t
1464 """Pipeline::JobWait(), called by 'wait' builtin, e.g. 'wait %1'."""
1465 # wait builtin can be interrupted
1466 assert self.procs, "no procs for Wait()"
1467 while self.state == job_state_e.Running:
1468 result, w1_arg = waiter.WaitForOne()
1469
1470 if result == W1_CALL_INTR: # signal
1471 return wait_status.Cancelled(w1_arg)
1472
1473 if result == W1_NO_CHILDREN:
1474 break
1475
1476 # Ignore W1_EXITED, W1_STOPPED - these are OTHER processes
1477
1478 assert all(st >= 0 for st in self.pipe_status), self.pipe_status
1479 return wait_status.Pipeline(self.state, self.pipe_status)
1480
1481 def RunLastPart(self, waiter, fd_state):
1482 # type: (Waiter, FdState) -> List[int]
1483 """Run this pipeline synchronously (foreground pipeline).
1484
1485 Returns:
1486 pipe_status (list of integers).
1487 """
1488 assert len(self.pids) == len(self.procs)
1489
1490 # TODO: break circular dep. Bit flags could go in ASDL or headers.
1491 from osh import cmd_eval
1492
1493 # This is tcsetpgrp()
1494 # TODO: fix race condition -- I believe the first process could have
1495 # stopped already, and thus getpgid() will fail
1496 self.job_control.MaybeGiveTerminal(self.pgid)
1497
1498 # Run the last part of the pipeline IN PARALLEL with other processes. It
1499 # may or may not fork:
1500 # echo foo | read line # no fork, the builtin runs in THIS shell process
1501 # ls | wc -l # fork for 'wc'
1502
1503 cmd_ev, last_node = self.last_thunk
1504
1505 assert self.last_pipe is not None
1506 r, w = self.last_pipe # set in AddLast()
1507 posix.close(w) # we will not write here
1508
1509 # Fix lastpipe / job control / DEBUG trap interaction
1510 cmd_flags = cmd_eval.NoDebugTrap if self.job_control.Enabled() else 0
1511
1512 # The ERR trap only runs for the WHOLE pipeline, not the COMPONENTS in
1513 # a pipeline.
1514 cmd_flags |= cmd_eval.NoErrTrap
1515
1516 io_errors = [] # type: List[error.IOError_OSError]
1517 with ctx_Pipe(fd_state, r, io_errors):
1518 cmd_ev.ExecuteAndCatch(last_node, cmd_flags)
1519
1520 if len(io_errors):
1521 e_die('Error setting up last part of pipeline: %s' %
1522 pyutil.strerror(io_errors[0]))
1523
1524 # We won't read anymore. If we don't do this, then 'cat' in 'cat
1525 # /dev/urandom | sleep 1' will never get SIGPIPE.
1526 posix.close(r)
1527
1528 self.pipe_status[-1] = cmd_ev.LastStatus()
1529 if self.AllExited():
1530 self.state = job_state_e.Exited
1531
1532 #log('pipestatus before all have finished = %s', self.pipe_status)
1533 return self.Wait(waiter)
1534
1535 def AllExited(self):
1536 # type: () -> bool
1537
1538 # mycpp rewrite: all(status != -1 for status in self.pipe_status)
1539 for status in self.pipe_status:
1540 if status == -1:
1541 return False
1542 return True
1543
1544 def WhenPartExited(self, pid, status):
1545 # type: (int, int) -> None
1546 """Called by Process::WhenExited()"""
1547 #log('Pipeline WhenExited %d %d', pid, status)
1548 i = self.pids.index(pid)
1549 assert i != -1, 'Unexpected PID %d' % pid
1550
1551 if status == 141 and self.sigpipe_status_ok:
1552 status = 0
1553
1554 self.pipe_status[i] = status
1555 if not self.AllExited():
1556 return
1557
1558 if self.job_id != -1 and self.in_background:
1559 # TODO: ONE condition
1560 # "Job might have been brought to the foreground after being
1561 # assigned a job ID"
1562 if self.exec_opts.interactive():
1563 print_stderr('[%%%d] PGID %d Done' %
1564 (self.job_id, self.pids[0]))
1565
1566 # Status of pipeline is status of last process
1567 self.status = self.pipe_status[-1]
1568 self.state = job_state_e.Exited
1569
1570 if not self.in_background:
1571 self.job_control.MaybeTakeTerminal()
1572
1573
1574def _JobStateStr(i):
1575 # type: (job_state_t) -> str
1576 return job_state_str(i, dot=False)
1577
1578
1579def _GetTtyFd():
1580 # type: () -> int
1581 """Returns -1 if stdio is not a TTY."""
1582 try:
1583 return posix.open("/dev/tty", O_NONBLOCK | O_NOCTTY | O_RDWR, 0o666)
1584 except (IOError, OSError) as e:
1585 return -1
1586
1587
1588class ctx_TerminalControl(object):
1589
1590 def __init__(self, job_control, errfmt):
1591 # type: (JobControl, ui.ErrorFormatter) -> None
1592 job_control.InitJobControl()
1593 self.job_control = job_control
1594 self.errfmt = errfmt
1595
1596 def __enter__(self):
1597 # type: () -> None
1598 pass
1599
1600 def __exit__(self, type, value, traceback):
1601 # type: (Any, Any, Any) -> None
1602
1603 # Return the TTY to the original owner before exiting.
1604 try:
1605 self.job_control.MaybeReturnTerminal()
1606 except error.FatalRuntime as e:
1607 # Don't abort the shell on error, just print a message.
1608 self.errfmt.PrettyPrintError(e)
1609
1610
1611class JobControl(object):
1612 """Interface to setpgid(), tcsetpgrp(), etc."""
1613
1614 def __init__(self):
1615 # type: () -> None
1616
1617 # The main shell's PID and group ID.
1618 self.shell_pid = -1
1619 self.shell_pgid = -1
1620
1621 # The fd of the controlling tty. Set to -1 when job control is disabled.
1622 self.shell_tty_fd = -1
1623
1624 # For giving the terminal back to our parent before exiting (if not a login
1625 # shell).
1626 self.original_tty_pgid = -1
1627
1628 def InitJobControl(self):
1629 # type: () -> None
1630 self.shell_pid = posix.getpid()
1631 orig_shell_pgid = posix.getpgid(0)
1632 self.shell_pgid = orig_shell_pgid
1633 self.shell_tty_fd = _GetTtyFd()
1634
1635 # If we aren't the leader of our process group, create a group and mark
1636 # ourselves as the leader.
1637 if self.shell_pgid != self.shell_pid:
1638 try:
1639 posix.setpgid(self.shell_pid, self.shell_pid)
1640 self.shell_pgid = self.shell_pid
1641 except (IOError, OSError) as e:
1642 self.shell_tty_fd = -1
1643
1644 if self.shell_tty_fd != -1:
1645 self.original_tty_pgid = posix.tcgetpgrp(self.shell_tty_fd)
1646
1647 # If stdio is a TTY, put the shell's process group in the foreground.
1648 try:
1649 posix.tcsetpgrp(self.shell_tty_fd, self.shell_pgid)
1650 except (IOError, OSError) as e:
1651 # We probably aren't in the session leader's process group. Disable job
1652 # control.
1653 self.shell_tty_fd = -1
1654 self.shell_pgid = orig_shell_pgid
1655 posix.setpgid(self.shell_pid, self.shell_pgid)
1656
1657 def Enabled(self):
1658 # type: () -> bool
1659 """
1660 Only the main shell process should bother with job control functions.
1661 """
1662 #log('ENABLED? %d', self.shell_tty_fd)
1663
1664 # TODO: get rid of getpid()? I think SubProgramThunk should set a
1665 # flag.
1666 return self.shell_tty_fd != -1 and posix.getpid() == self.shell_pid
1667
1668 # TODO: This isn't a PID. This is a process group ID?
1669 #
1670 # What should the table look like?
1671 #
1672 # Do we need the last PID? I don't know why bash prints that. Probably so
1673 # you can do wait $!
1674 # wait -n waits for any node to go from job_state_e.Running to job_state_e.Done?
1675 #
1676 # And it needs a flag for CURRENT, for the implicit arg to 'fg'.
1677 # job_id is just an integer. This is sort of lame.
1678 #
1679 # [job_id, flag, pgid, job_state, node]
1680
1681 def MaybeGiveTerminal(self, pgid):
1682 # type: (int) -> None
1683 """If stdio is a TTY, move the given process group to the
1684 foreground."""
1685 if not self.Enabled():
1686 # Only call tcsetpgrp when job control is enabled.
1687 return
1688
1689 try:
1690 posix.tcsetpgrp(self.shell_tty_fd, pgid)
1691 except (IOError, OSError) as e:
1692 e_die('osh: Failed to move process group %d to foreground: %s' %
1693 (pgid, pyutil.strerror(e)))
1694
1695 def MaybeTakeTerminal(self):
1696 # type: () -> None
1697 """If stdio is a TTY, return the main shell's process group to the
1698 foreground."""
1699 self.MaybeGiveTerminal(self.shell_pgid)
1700
1701 def MaybeReturnTerminal(self):
1702 # type: () -> None
1703 """Called before the shell exits."""
1704 self.MaybeGiveTerminal(self.original_tty_pgid)
1705
1706
1707class JobList(object):
1708 """Global list of jobs, used by a few builtins."""
1709
1710 def __init__(self):
1711 # type: () -> None
1712
1713 # self.child_procs is used by WaitForOne() to call proc.WhenExited()
1714 # and proc.WhenStopped().
1715 self.child_procs = {} # type: Dict[int, Process]
1716
1717 # self.jobs is used by 'wait %1' and 'fg %2'
1718 # job_id -> Job
1719 self.jobs = {} # type: Dict[int, Job]
1720
1721 # self.pid_to_job is used by 'wait -n' and 'wait' - to call
1722 # CleanupWhenProcessExits(). They Dict key is job.PidForWait()
1723 self.pid_to_job = {} # type: Dict[int, Job]
1724
1725 # TODO: consider linear search through JobList
1726 # - by job ID
1727 # - by PID
1728 # - then you don't have to bother as much with the dicts
1729 # - you still need the child process dict to set the status and
1730 # state?
1731
1732 self.debug_pipelines = [] # type: List[Pipeline]
1733
1734 # Counter used to assign IDs to jobs. It is incremented every time a job
1735 # is created. Once all active jobs are done it is reset to 1. I'm not
1736 # sure if this reset behavior is mandated by POSIX, but other shells do
1737 # it, so we mimic for the sake of compatibility.
1738 self.next_job_id = 1
1739
1740 def RegisterJob(self, job):
1741 # type: (Job) -> int
1742 """Create a background job, which you can wait %2, fg %2, kill %2, etc.
1743
1744 - A job is either a Process or Pipeline.
1745 - A job is registered in these 2 situations:
1746 1. async: sleep 5 &
1747 2. stopped: sleep 5; then Ctrl-Z
1748 That is, in the interactive shell, the foreground process can be
1749 receives signals, and can be stopped
1750 """
1751 job_id = self.next_job_id
1752 self.next_job_id += 1
1753
1754 # Look up the job by job ID, for wait %1, kill %1, etc.
1755 self.jobs[job_id] = job
1756
1757 # Pipelines
1758 # TODO: register all PIDs? And conversely, remove all PIDs
1759 # what do other shells do?
1760 self.pid_to_job[job.PidForWait()] = job
1761
1762 # Mutate the job itself
1763 job.job_id = job_id
1764
1765 return job_id
1766
1767 def JobFromPid(self, pid):
1768 # type: (int) -> Optional[Job]
1769 return self.pid_to_job.get(pid)
1770
1771 def _MaybeResetCounter(self):
1772 # type: () -> None
1773 if len(self.jobs) == 0:
1774 self.next_job_id = 1
1775
1776 def CleanupWhenJobExits(self, job):
1777 # type: (Job) -> None
1778 """Called when say 'fg %2' exits, and when 'wait %2' exits"""
1779 mylib.dict_erase(self.jobs, job.job_id)
1780
1781 mylib.dict_erase(self.pid_to_job, job.PidForWait())
1782
1783 self._MaybeResetCounter()
1784
1785 def CleanupWhenProcessExits(self, pid):
1786 # type: (int) -> None
1787 """Given a PID, remove the job if it has Exited."""
1788
1789 job = self.pid_to_job.get(pid)
1790 if 0:
1791 # TODO: background pipelines don't clean up properly, because only
1792 # the last PID is registered in job_list.pid_to_job
1793
1794 # Should we switch to a linear search of a background job array?
1795 # Foreground jobs are stored in self.child_procs, and we migrate
1796 # between them?
1797
1798 log('*** CleanupWhenProcessExits %d', pid)
1799 log('job %s', job)
1800 #log('STATE %s', _JobStateStr(job.state))
1801
1802 if job and job.state == job_state_e.Exited:
1803 # Note: only the LAST PID in a pipeline will ever be here, but it's
1804 # OK to try to delete it.
1805 mylib.dict_erase(self.pid_to_job, pid)
1806
1807 mylib.dict_erase(self.jobs, job.job_id)
1808
1809 self._MaybeResetCounter()
1810
1811 def AddChildProcess(self, pid, proc):
1812 # type: (int, Process) -> None
1813 """Every child process should be added here as soon as we know its PID.
1814
1815 When the Waiter gets an EXITED or STOPPED notification, we need
1816 to know about it so 'jobs' can work.
1817
1818 Note: this contains Process objects that are part of a Pipeline object.
1819 Does it need to?
1820 """
1821 self.child_procs[pid] = proc
1822
1823 def PopChildProcess(self, pid):
1824 # type: (int) -> Optional[Process]
1825 """Remove the child process with the given PID."""
1826 pr = self.child_procs.get(pid)
1827 if pr is not None:
1828 mylib.dict_erase(self.child_procs, pid)
1829 return pr
1830
1831 if mylib.PYTHON:
1832
1833 def AddPipeline(self, pi):
1834 # type: (Pipeline) -> None
1835 """For debugging only."""
1836 self.debug_pipelines.append(pi)
1837
1838 def GetCurrentAndPreviousJobs(self):
1839 # type: () -> Tuple[Optional[Job], Optional[Job]]
1840 """Return the "current" and "previous" jobs (AKA `%+` and `%-`).
1841
1842 See the POSIX specification for the `jobs` builtin for details:
1843 https://pubs.opengroup.org/onlinepubs/007904875/utilities/jobs.html
1844
1845 IMPORTANT NOTE: This method assumes that the jobs list will not change
1846 during its execution! This assumption holds for now because we only ever
1847 update the jobs list from the main loop after WaitPid() informs us of a
1848 change. If we implement `set -b` and install a signal handler for
1849 SIGCHLD we should be careful to synchronize it with this function. The
1850 unsafety of mutating GC data structures from a signal handler should
1851 make this a non-issue, but if bugs related to this appear this note may
1852 be helpful...
1853 """
1854 # Split all active jobs by state and sort each group by decreasing job
1855 # ID to approximate newness.
1856 stopped_jobs = [] # type: List[Job]
1857 running_jobs = [] # type: List[Job]
1858 for i in xrange(0, self.next_job_id):
1859 job = self.jobs.get(i, None)
1860 if not job:
1861 continue
1862
1863 if job.state == job_state_e.Stopped:
1864 stopped_jobs.append(job)
1865
1866 elif job.state == job_state_e.Running:
1867 running_jobs.append(job)
1868
1869 current = None # type: Optional[Job]
1870 previous = None # type: Optional[Job]
1871 # POSIX says: If there is any suspended job, then the current job shall
1872 # be a suspended job. If there are at least two suspended jobs, then the
1873 # previous job also shall be a suspended job.
1874 #
1875 # So, we will only return running jobs from here if there are no recent
1876 # stopped jobs.
1877 if len(stopped_jobs) > 0:
1878 current = stopped_jobs.pop()
1879
1880 if len(stopped_jobs) > 0:
1881 previous = stopped_jobs.pop()
1882
1883 if len(running_jobs) > 0 and not current:
1884 current = running_jobs.pop()
1885
1886 if len(running_jobs) > 0 and not previous:
1887 previous = running_jobs.pop()
1888
1889 if not previous:
1890 previous = current
1891
1892 return current, previous
1893
1894 def JobFromSpec(self, job_spec):
1895 # type: (str) -> Optional[Job]
1896 """Parse the given job spec and return the matching job. If there is no
1897 matching job, this function returns None.
1898
1899 See the POSIX spec for the `jobs` builtin for details about job specs:
1900 https://pubs.opengroup.org/onlinepubs/007904875/utilities/jobs.html
1901 """
1902 if job_spec in CURRENT_JOB_SPECS:
1903 current, _ = self.GetCurrentAndPreviousJobs()
1904 return current
1905
1906 if job_spec == '%-':
1907 _, previous = self.GetCurrentAndPreviousJobs()
1908 return previous
1909
1910 #log('** SEARCHING %s', self.jobs)
1911 # TODO: Add support for job specs based on prefixes of process argv.
1912 m = util.RegexSearch(r'^%([0-9]+)$', job_spec)
1913 if m is not None:
1914 assert len(m) == 2
1915 job_id = int(m[1])
1916 if job_id in self.jobs:
1917 return self.jobs[job_id]
1918
1919 return None
1920
1921 def DisplayJobs(self, style):
1922 # type: (int) -> None
1923 """Used by the 'jobs' builtin.
1924
1925 https://pubs.opengroup.org/onlinepubs/9699919799/utilities/jobs.html
1926
1927 "By default, the jobs utility shall display the status of all stopped jobs,
1928 running background jobs and all jobs whose status has changed and have not
1929 been reported by the shell."
1930 """
1931 # NOTE: A job is a background process or pipeline.
1932 #
1933 # echo hi | wc -l -- this starts two processes. Wait for TWO
1934 # echo hi | wc -l & -- this starts a process which starts two processes
1935 # Wait for ONE.
1936 #
1937 # 'jobs -l' GROUPS the PIDs by job. It has the job number, + - indicators
1938 # for %% and %-, PID, status, and "command".
1939 #
1940 # Every component of a pipeline is on the same line with 'jobs', but
1941 # they're separated into different lines with 'jobs -l'.
1942 #
1943 # See demo/jobs-builtin.sh
1944
1945 # $ jobs -l
1946 # [1]+ 24414 Stopped sleep 5
1947 # 24415 | sleep 5
1948 # [2] 24502 Running sleep 6
1949 # 24503 | sleep 6
1950 # 24504 | sleep 5 &
1951 # [3]- 24508 Running sleep 6
1952 # 24509 | sleep 6
1953 # 24510 | sleep 5 &
1954
1955 f = mylib.Stdout()
1956 for job_id, job in iteritems(self.jobs):
1957 # Use the %1 syntax
1958 job.DisplayJob(job_id, f, style)
1959
1960 def DebugPrint(self):
1961 # type: () -> None
1962
1963 f = mylib.Stdout()
1964 f.write('\n')
1965 f.write('[process debug info]\n')
1966
1967 for pid, proc in iteritems(self.child_procs):
1968 proc.DisplayJob(-1, f, STYLE_DEFAULT)
1969 #p = ' |' if proc.parent_pipeline else ''
1970 #print('%d %7s %s%s' % (pid, _JobStateStr(proc.state), proc.thunk.UserString(), p))
1971
1972 if len(self.debug_pipelines):
1973 f.write('\n')
1974 f.write('[pipeline debug info]\n')
1975 for pi in self.debug_pipelines:
1976 pi.DebugPrint()
1977
1978 def ListRecent(self):
1979 # type: () -> None
1980 """For jobs -n, which I think is also used in the interactive
1981 prompt."""
1982 pass
1983
1984 def NumRunning(self):
1985 # type: () -> int
1986 """Return the number of running jobs.
1987
1988 Used by 'wait' and 'wait -n'.
1989 """
1990 count = 0
1991 for _, job in iteritems(self.jobs): # mycpp rewrite: from itervalues()
1992 if job.State() == job_state_e.Running:
1993 count += 1
1994 return count
1995
1996
1997# Some WaitForOne() return values, which are negative. The numbers are
1998# arbitrary negative numbers.
1999#
2000# They don't overlap with iolib.UNTRAPPED_SIGWINCH == -10
2001# which LastSignal() can return
2002
2003W1_EXITED = -11 # process exited
2004W1_STOPPED = -12 # process was stopped
2005W1_CALL_INTR = -15 # the waitpid(-1) call was interrupted
2006
2007W1_NO_CHILDREN = -13 # no child processes to wait for
2008W1_NO_CHANGE = -14 # WNOHANG was passed and there were no state changes
2009
2010NO_ARG = -20
2011
2012
2013class Waiter(object):
2014 """A capability to wait for processes.
2015
2016 This must be a singleton (and is because CommandEvaluator is a singleton).
2017
2018 Invariants:
2019 - Every child process is registered once
2020 - Every child process is waited for
2021
2022 Canonical example of why we need a GLOBAL waiter:
2023
2024 { sleep 3; echo 'done 3'; } &
2025 { sleep 4; echo 'done 4'; } &
2026
2027 # ... do arbitrary stuff ...
2028
2029 { sleep 1; exit 1; } | { sleep 2; exit 2; }
2030
2031 Now when you do wait() after starting the pipeline, you might get a pipeline
2032 process OR a background process! So you have to distinguish between them.
2033 """
2034
2035 def __init__(self, job_list, exec_opts, signal_safe, tracer):
2036 # type: (JobList, optview.Exec, iolib.SignalSafe, dev.Tracer) -> None
2037 self.job_list = job_list
2038 self.exec_opts = exec_opts
2039 self.signal_safe = signal_safe
2040 self.tracer = tracer
2041 self.last_status = 127 # wait -n error code
2042
2043 def LastStatusCode(self):
2044 # type: () -> int
2045 """Returns exit code for wait -n"""
2046 return self.last_status
2047
2048 def WaitForOne(self, waitpid_options=0):
2049 # type: (int) -> Tuple[int, int]
2050 """Wait until the next process returns (or maybe Ctrl-C).
2051
2052 Returns:
2053 One of these negative numbers:
2054 W1_NO_CHILDREN Nothing to wait for
2055 W1_NO_CHANGE no state changes when WNOHANG passed - used by
2056 main loop
2057 W1_EXITED Process exited (with or without signal)
2058 W1_STOPPED Process stopped
2059 W1_CALL_INTR
2060 UNTRAPPED_SIGWINCH
2061 Or
2062 result > 0 Signal that waitpid() was interrupted with
2063
2064 In the interactive shell, we return 0 if we get a Ctrl-C, so the caller
2065 will try again.
2066
2067 Callers:
2068 wait -n -- loop until there is one fewer process (TODO)
2069 wait -- loop until there are no processes
2070 wait $! -- loop until job state is Done (process or pipeline)
2071 Process::Wait() -- loop until Process state is done
2072 Pipeline::Wait() -- loop until Pipeline state is done
2073
2074 Comparisons:
2075 bash: jobs.c waitchld() Has a special case macro(!) CHECK_WAIT_INTR for
2076 the wait builtin
2077
2078 dash: jobs.c waitproc() uses sigfillset(), sigprocmask(), etc. Runs in a
2079 loop while (gotsigchld), but that might be a hack for System V!
2080
2081 Should we have a cleaner API like posix::wait_for_one() ?
2082
2083 wait_result =
2084 NoChildren -- ECHILD - no more
2085 | Exited(int pid) -- process done - call job_list.PopStatus() for status
2086 # do we also we want ExitedWithSignal() ?
2087 | Stopped(int pid)
2088 | Interrupted(int sig_num) -- may or may not retry
2089 | UntrappedSigwinch -- ignored
2090
2091 | NoChange -- for WNOHANG - is this a different API?
2092 """
2093 #waitpid_options |= WCONTINUED
2094 pid, status = pyos.WaitPid(waitpid_options)
2095 if pid == 0:
2096 return W1_NO_CHANGE, NO_ARG # WNOHANG passed, and no state changes
2097
2098 if pid < 0: # error case
2099 err_num = status
2100 #log('waitpid() error => %d %s', e.errno, pyutil.strerror(e))
2101 if err_num == ECHILD:
2102 return W1_NO_CHILDREN, NO_ARG
2103
2104 if err_num == EINTR: # Bug #858 fix
2105 # e.g. 1 for SIGHUP, or also be UNTRAPPED_SIGWINCH == -1
2106 last_sig = self.signal_safe.LastSignal()
2107 if last_sig == iolib.UNTRAPPED_SIGWINCH:
2108 return iolib.UNTRAPPED_SIGWINCH, NO_ARG
2109 else:
2110 return W1_CALL_INTR, last_sig
2111
2112 # No other errors? Man page says waitpid(INT_MIN) == ESRCH, "no
2113 # such process", an invalid PID
2114 raise AssertionError()
2115
2116 # All child processes are supposed to be in this dict. Even if a
2117 # grandchild outlives the child (its parent), the shell does NOT become
2118 # the parent. The init process does.
2119 proc = self.job_list.child_procs.get(pid)
2120
2121 if proc is None and self.exec_opts.verbose_warn():
2122 print_stderr("oils: PID %d exited, but oils didn't start it" % pid)
2123
2124 if 0:
2125 self.job_list.DebugPrint()
2126
2127 was_stopped = False
2128 if WIFSIGNALED(status):
2129 term_sig = WTERMSIG(status)
2130 status = 128 + term_sig
2131
2132 # Print newline after Ctrl-C.
2133 if term_sig == SIGINT:
2134 print('')
2135
2136 if proc:
2137 proc.WhenExited(pid, status)
2138
2139 elif WIFEXITED(status):
2140 status = WEXITSTATUS(status)
2141 if proc:
2142 proc.WhenExited(pid, status)
2143
2144 elif WIFSTOPPED(status):
2145 was_stopped = True
2146
2147 stop_sig = WSTOPSIG(status)
2148
2149 if proc:
2150 proc.WhenStopped(stop_sig)
2151
2152 # This would be more consistent, but it's an extension to POSIX
2153 #elif WIFCONTINUED(status):
2154 # if proc:
2155 # proc.WhenContinued()
2156
2157 else:
2158 raise AssertionError(status)
2159
2160 self.last_status = status # for wait -n
2161 self.tracer.OnProcessEnd(pid, status)
2162
2163 if was_stopped:
2164 return W1_STOPPED, pid
2165 else:
2166 return W1_EXITED, pid
2167
2168 def PollForEvents(self):
2169 # type: () -> None
2170 """For the interactive shell to print when processes have exited."""
2171 while True:
2172 result, _ = self.WaitForOne(waitpid_options=WNOHANG)
2173
2174 if result == W1_NO_CHANGE:
2175 break
2176 if result == W1_NO_CHILDREN:
2177 break
2178
2179 # Keep polling here
2180 assert result in (W1_EXITED, W1_STOPPED), result
2181 # W1_CALL_INTR and iolib.UNTRAPPED_SIGWINCH should not happen,
2182 # because WNOHANG is a non-blocking call