OILS / builtin / process_osh.py View on Github | oils.pub

685 lines, 421 significant
1#!/usr/bin/env python2
2"""
3builtin_process.py - Builtins that deal with processes or modify process state.
4
5This is sort of the opposite of builtin_pure.py.
6"""
7from __future__ import print_function
8
9import resource
10from resource import (RLIM_INFINITY, RLIMIT_CORE, RLIMIT_CPU, RLIMIT_DATA,
11 RLIMIT_FSIZE, RLIMIT_NOFILE, RLIMIT_STACK, RLIMIT_AS)
12from signal import SIGCONT
13
14from _devbuild.gen import arg_types
15from _devbuild.gen.syntax_asdl import loc, CompoundWord
16from _devbuild.gen.runtime_asdl import (cmd_value, job_state_e, wait_status,
17 wait_status_e)
18from core import dev
19from core import error
20from core.error import e_usage, e_die_status
21from core import process # W1_EXITED, etc.
22from core import pyos
23from core import pyutil
24from core import vm
25from frontend import flag_util
26from frontend import match
27from frontend import typed_args
28from mycpp import mops
29from mycpp import mylib
30from mycpp.mylib import log, tagswitch, print_stderr
31
32import posix_ as posix
33
34from typing import TYPE_CHECKING, List, Tuple, Optional, cast
35if TYPE_CHECKING:
36 from core.process import Waiter, ExternalProgram, FdState
37 from core import executor
38 from core import state
39 from display import ui
40
41_ = log
42
43
44class Jobs(vm._Builtin):
45 """List jobs."""
46
47 def __init__(self, job_list):
48 # type: (process.JobList) -> None
49 self.job_list = job_list
50
51 def Run(self, cmd_val):
52 # type: (cmd_value.Argv) -> int
53
54 attrs, arg_r = flag_util.ParseCmdVal('jobs', cmd_val)
55 arg = arg_types.jobs(attrs.attrs)
56
57 if arg.l:
58 style = process.STYLE_LONG
59 elif arg.p:
60 style = process.STYLE_PID_ONLY
61 else:
62 style = process.STYLE_DEFAULT
63
64 self.job_list.DisplayJobs(style)
65
66 if arg.debug:
67 self.job_list.DebugPrint()
68
69 return 0
70
71
72class Fg(vm._Builtin):
73 """Put a job in the foreground."""
74
75 def __init__(self, job_control, job_list, waiter):
76 # type: (process.JobControl, process.JobList, Waiter) -> None
77 self.job_control = job_control
78 self.job_list = job_list
79 self.waiter = waiter
80 self.exec_opts = waiter.exec_opts
81
82 def Run(self, cmd_val):
83 # type: (cmd_value.Argv) -> int
84
85 job_spec = '' # Job spec for current job is the default
86 if len(cmd_val.argv) > 1:
87 job_spec = cmd_val.argv[1]
88
89 job = self.job_list.JobFromSpec(job_spec)
90 # note: the 'wait' builtin falls back to JobFromPid()
91 if job is None:
92 print_stderr('fg: No job to put in the foreground')
93 return 1
94
95 pgid = job.ProcessGroupId()
96 assert pgid != process.INVALID_PGID, \
97 'Processes put in the background should have a PGID'
98
99 # Put the job's process group back into the foreground. GiveTerminal() must
100 # be called before sending SIGCONT or else the process might immediately get
101 # suspended again if it tries to read/write on the terminal.
102 self.job_control.MaybeGiveTerminal(pgid)
103 posix.killpg(pgid, SIGCONT) # Send signal
104
105 if self.exec_opts.interactive():
106 print_stderr('[%%%d] PID %d Continued' % (job.job_id, pgid))
107
108 # We are not using waitpid(WCONTINUE) and WIFCONTINUED() in
109 # WaitForOne() -- it's an extension to POSIX that isn't necessary for 'fg'
110 job.SetForeground()
111 job.state = job_state_e.Running
112
113 status = -1
114
115 wait_st = job.JobWait(self.waiter)
116 UP_wait_st = wait_st
117 with tagswitch(wait_st) as case:
118 if case(wait_status_e.Proc):
119 wait_st = cast(wait_status.Proc, UP_wait_st)
120 if wait_st.state == job_state_e.Exited:
121 self.job_list.PopChildProcess(job.PidForWait())
122 self.job_list.CleanupWhenJobExits(job)
123 status = wait_st.code
124
125 elif case(wait_status_e.Pipeline):
126 wait_st = cast(wait_status.Pipeline, UP_wait_st)
127 # TODO: handle PIPESTATUS? Is this right?
128 status = wait_st.codes[-1]
129
130 elif case(wait_status_e.Cancelled):
131 wait_st = cast(wait_status.Cancelled, UP_wait_st)
132 status = 128 + wait_st.sig_num
133
134 else:
135 raise AssertionError()
136
137 return status
138
139
140class Bg(vm._Builtin):
141 """Put a job in the background."""
142
143 def __init__(self, job_list):
144 # type: (process.JobList) -> None
145 self.job_list = job_list
146
147 def Run(self, cmd_val):
148 # type: (cmd_value.Argv) -> int
149
150 # How does this differ from 'fg'? It doesn't wait and it sets controlling
151 # terminal?
152
153 raise error.Usage("isn't implemented", loc.Missing)
154
155
156class Fork(vm._Builtin):
157
158 def __init__(self, shell_ex):
159 # type: (vm._Executor) -> None
160 self.shell_ex = shell_ex
161
162 def Run(self, cmd_val):
163 # type: (cmd_value.Argv) -> int
164 _, arg_r = flag_util.ParseCmdVal('fork',
165 cmd_val,
166 accept_typed_args=True)
167
168 arg, location = arg_r.Peek2()
169 if arg is not None:
170 e_usage('got unexpected argument %r' % arg, location)
171
172 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
173 return self.shell_ex.RunBackgroundJob(cmd_frag)
174
175
176class ForkWait(vm._Builtin):
177
178 def __init__(self, shell_ex):
179 # type: (vm._Executor) -> None
180 self.shell_ex = shell_ex
181
182 def Run(self, cmd_val):
183 # type: (cmd_value.Argv) -> int
184 _, arg_r = flag_util.ParseCmdVal('forkwait',
185 cmd_val,
186 accept_typed_args=True)
187 arg, location = arg_r.Peek2()
188 if arg is not None:
189 e_usage('got unexpected argument %r' % arg, location)
190
191 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
192 return self.shell_ex.RunSubshell(cmd_frag)
193
194
195class Exec(vm._Builtin):
196
197 def __init__(
198 self,
199 mem, # type: state.Mem
200 ext_prog, # type: ExternalProgram
201 fd_state, # type: FdState
202 search_path, # type: executor.SearchPath
203 errfmt, # type: ui.ErrorFormatter
204 ):
205 # type: (...) -> None
206 self.mem = mem
207 self.ext_prog = ext_prog
208 self.fd_state = fd_state
209 self.search_path = search_path
210 self.errfmt = errfmt
211
212 def Run(self, cmd_val):
213 # type: (cmd_value.Argv) -> int
214 _, arg_r = flag_util.ParseCmdVal('exec', cmd_val)
215
216 # Apply redirects in this shell. # NOTE: Redirects were processed earlier.
217 if arg_r.AtEnd():
218 self.fd_state.MakePermanent()
219 return 0
220
221 environ = self.mem.GetEnv()
222 if 0:
223 log('E %r', environ)
224 log('E %r', environ)
225 log('ZZ %r', environ.get('ZZ'))
226 i = arg_r.i
227 cmd = cmd_val.argv[i]
228 argv0_path = self.search_path.CachedLookup(cmd)
229 if argv0_path is None:
230 e_die_status(127, 'exec: %r not found' % cmd, cmd_val.arg_locs[1])
231
232 # shift off 'exec', and remove typed args because they don't apply
233 c2 = cmd_value.Argv(cmd_val.argv[i:], cmd_val.arg_locs[i:],
234 cmd_val.is_last_cmd, cmd_val.self_obj, None)
235
236 self.ext_prog.Exec(argv0_path, c2, environ) # NEVER RETURNS
237 # makes mypy and C++ compiler happy
238 raise AssertionError('unreachable')
239
240
241class Wait(vm._Builtin):
242 """
243 wait: wait [-n] [id ...]
244 Wait for job completion and return exit status.
245
246 Waits for each process identified by an ID, which may be a process ID or a
247 job specification, and reports its termination status. If ID is not
248 given, waits for all currently active child processes, and the return
249 status is zero. If ID is a a job specification, waits for all processes
250 in that job's pipeline.
251
252 If the -n option is supplied, waits for the next job to terminate and
253 returns its exit status.
254
255 Exit Status:
256 Returns the status of the last ID; fails if ID is invalid or an invalid
257 option is given.
258 """
259
260 def __init__(
261 self,
262 waiter, # type: Waiter
263 job_list, #type: process.JobList
264 mem, # type: state.Mem
265 tracer, # type: dev.Tracer
266 errfmt, # type: ui.ErrorFormatter
267 ):
268 # type: (...) -> None
269 self.waiter = waiter
270 self.job_list = job_list
271 self.mem = mem
272 self.tracer = tracer
273 self.errfmt = errfmt
274 self.exec_opts = waiter.exec_opts
275
276 def Run(self, cmd_val):
277 # type: (cmd_value.Argv) -> int
278 with dev.ctx_Tracer(self.tracer, 'wait', cmd_val.argv):
279 return self._Run(cmd_val)
280
281 def _WaitForJobs(self, job_ids, arg_locs):
282 # type: (List[str], List[CompoundWord]) -> int
283
284 # Get list of jobs. Then we need to check if they are ALL stopped.
285 # Returns the exit code of the last one on the COMMAND LINE, not the
286 # exit code of last one to FINISH.
287
288 jobs = [] # type: List[process.Job]
289 for i, job_id in enumerate(job_ids):
290 location = arg_locs[i]
291
292 job = None # type: Optional[process.Job]
293 if job_id == '' or job_id.startswith('%'):
294 job = self.job_list.JobFromSpec(job_id)
295
296 if job is None:
297 #log('JOB %s', job_id)
298 # Does it look like a PID?
299 try:
300 pid = int(job_id)
301 except ValueError:
302 raise error.Usage(
303 'expected PID or jobspec, got %r' % job_id, location)
304
305 job = self.job_list.JobFromPid(pid)
306 #log('WAIT JOB %r', job)
307
308 if job is None:
309 self.errfmt.Print_("Job %s was't found" % job_id,
310 blame_loc=location)
311 return 127
312
313 jobs.append(job)
314
315 status = 1 # error
316 for job in jobs:
317 # polymorphic call: Process, Pipeline
318 wait_st = job.JobWait(self.waiter)
319
320 UP_wait_st = wait_st
321 with tagswitch(wait_st) as case:
322 if case(wait_status_e.Proc):
323 wait_st = cast(wait_status.Proc, UP_wait_st)
324 if wait_st.state == job_state_e.Exited:
325 self.job_list.PopChildProcess(job.PidForWait())
326 self.job_list.CleanupWhenJobExits(job)
327 status = wait_st.code
328
329 elif case(wait_status_e.Pipeline):
330 wait_st = cast(wait_status.Pipeline, UP_wait_st)
331 # TODO: handle PIPESTATUS? Is this right?
332 status = wait_st.codes[-1]
333
334 # It would be logical to set PIPESTATUS here, but it's NOT
335 # what other shells do
336 #
337 # I think PIPESTATUS is legacy, and we can design better
338 # YSH semantics
339 #self.mem.SetPipeStatus(wait_st.codes)
340
341 elif case(wait_status_e.Cancelled):
342 wait_st = cast(wait_status.Cancelled, UP_wait_st)
343 status = 128 + wait_st.sig_num
344
345 else:
346 raise AssertionError()
347
348 # Return the last status
349 return status
350
351 def _WaitNext(self):
352 # type: () -> int
353
354 # Loop until there is one fewer process running, there's nothing to wait
355 # for, or there's a signal
356 n = self.job_list.NumRunning()
357 if n == 0:
358 status = 127
359 else:
360 target = n - 1
361 status = 0
362 while self.job_list.NumRunning() > target:
363 result, w1_arg = self.waiter.WaitForOne()
364 if result == process.W1_EXITED:
365 pid = w1_arg
366 pr = self.job_list.PopChildProcess(pid)
367 # TODO: background pipelines don't clean up properly,
368 # because only the last PID is registered in
369 # job_list.pid_to_job
370 self.job_list.CleanupWhenProcessExits(pid)
371
372 if pr is None:
373 if self.exec_opts.verbose_warn():
374 print_stderr(
375 "oils wait: PID %d exited, but oils didn't start it"
376 % pid)
377 else:
378 status = pr.status
379
380 elif result == process.W1_NO_CHILDREN:
381 status = 127
382 break
383
384 elif result == process.W1_CALL_INTR: # signal
385 status = 128 + w1_arg
386 break
387
388 return status
389
390 def _Run(self, cmd_val):
391 # type: (cmd_value.Argv) -> int
392 attrs, arg_r = flag_util.ParseCmdVal('wait', cmd_val)
393 arg = arg_types.wait(attrs.attrs)
394
395 job_ids, arg_locs = arg_r.Rest2()
396
397 if len(job_ids):
398 # Note: -n and --all ignored in this case, like bash
399 return self._WaitForJobs(job_ids, arg_locs)
400
401 if arg.n:
402 return self._WaitNext()
403
404 # 'wait' or wait --all
405
406 status = 0
407
408 # Note: NumRunning() makes sure we ignore stopped processes, which
409 # cause WaitForOne() to return
410 while self.job_list.NumRunning() != 0:
411 result, w1_arg = self.waiter.WaitForOne()
412 if result == process.W1_EXITED:
413 pid = w1_arg
414 pr = self.job_list.PopChildProcess(pid)
415 # TODO: background pipelines don't clean up properly, because
416 # only the last PID is registered in job_list.pid_to_job
417 self.job_list.CleanupWhenProcessExits(pid)
418
419 if arg.verbose:
420 self.errfmt.PrintMessage(
421 '(wait) PID %d exited with status %d' %
422 (pid, pr.status), cmd_val.arg_locs[0])
423
424 if pr.status != 0 and arg.all: # YSH extension: respect failure
425 if arg.verbose:
426 self.errfmt.PrintMessage(
427 'wait --all: will fail with status 1')
428 status = 1 # set status, but keep waiting
429
430 if result == process.W1_NO_CHILDREN:
431 break # status is 0
432
433 if result == process.W1_CALL_INTR:
434 status = 128 + w1_arg
435 break
436
437 return status
438
439
440class Umask(vm._Builtin):
441
442 def __init__(self):
443 # type: () -> None
444 """Dummy constructor for mycpp."""
445 pass
446
447 def Run(self, cmd_val):
448 # type: (cmd_value.Argv) -> int
449
450 argv = cmd_val.argv[1:]
451 if len(argv) == 0:
452 # umask() has a dumb API: you can't get it without modifying it first!
453 # NOTE: dash disables interrupts around the two umask() calls, but that
454 # shouldn't be a concern for us. Signal handlers won't call umask().
455 mask = posix.umask(0)
456 posix.umask(mask) #
457 print('0%03o' % mask) # octal format
458 return 0
459
460 if len(argv) == 1:
461 a = argv[0]
462 try:
463 new_mask = int(a, 8)
464 except ValueError:
465 # NOTE: This also happens when we have '8' or '9' in the input.
466 print_stderr(
467 "oils warning: umask with symbolic input isn't implemented"
468 )
469 return 1
470
471 posix.umask(new_mask)
472 return 0
473
474 e_usage('umask: unexpected arguments', loc.Missing)
475
476
477def _LimitString(lim, factor):
478 # type: (mops.BigInt, int) -> str
479 if mops.Equal(lim, mops.FromC(RLIM_INFINITY)):
480 return 'unlimited'
481 else:
482 i = mops.Div(lim, mops.IntWiden(factor))
483 return mops.ToStr(i)
484
485
486class Ulimit(vm._Builtin):
487
488 def __init__(self):
489 # type: () -> None
490 """Dummy constructor for mycpp."""
491
492 self._table = None # type: List[Tuple[str, int, int, str]]
493
494 def _Table(self):
495 # type: () -> List[Tuple[str, int, int, str]]
496
497 # POSIX 2018
498 #
499 # https://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html
500 if self._table is None:
501 # This table matches _ULIMIT_RESOURCES in frontend/flag_def.py
502
503 # flag, RLIMIT_X, factor, description
504 self._table = [
505 # Following POSIX and most shells except bash, -f is in
506 # blocks of 512 bytes
507 ('-c', RLIMIT_CORE, 512, 'core dump size'),
508 ('-d', RLIMIT_DATA, 1024, 'data segment size'),
509 ('-f', RLIMIT_FSIZE, 512, 'file size'),
510 ('-n', RLIMIT_NOFILE, 1, 'file descriptors'),
511 ('-s', RLIMIT_STACK, 1024, 'stack size'),
512 ('-t', RLIMIT_CPU, 1, 'CPU seconds'),
513 ('-v', RLIMIT_AS, 1024, 'address space size'),
514 ]
515
516 return self._table
517
518 def _FindFactor(self, what):
519 # type: (int) -> int
520 for _, w, factor, _ in self._Table():
521 if w == what:
522 return factor
523 raise AssertionError()
524
525 def Run(self, cmd_val):
526 # type: (cmd_value.Argv) -> int
527
528 attrs, arg_r = flag_util.ParseCmdVal('ulimit', cmd_val)
529 arg = arg_types.ulimit(attrs.attrs)
530
531 what = 0
532 num_what_flags = 0
533
534 if arg.c:
535 what = RLIMIT_CORE
536 num_what_flags += 1
537
538 if arg.d:
539 what = RLIMIT_DATA
540 num_what_flags += 1
541
542 if arg.f:
543 what = RLIMIT_FSIZE
544 num_what_flags += 1
545
546 if arg.n:
547 what = RLIMIT_NOFILE
548 num_what_flags += 1
549
550 if arg.s:
551 what = RLIMIT_STACK
552 num_what_flags += 1
553
554 if arg.t:
555 what = RLIMIT_CPU
556 num_what_flags += 1
557
558 if arg.v:
559 what = RLIMIT_AS
560 num_what_flags += 1
561
562 if num_what_flags > 1:
563 raise error.Usage(
564 'can only handle one resource at a time; got too many flags',
565 cmd_val.arg_locs[0])
566
567 # Print all
568 show_all = arg.a or arg.all
569 if show_all:
570 if num_what_flags > 0:
571 raise error.Usage("doesn't accept resource flags with -a",
572 cmd_val.arg_locs[0])
573
574 extra, extra_loc = arg_r.Peek2()
575 if extra is not None:
576 raise error.Usage('got extra arg with -a', extra_loc)
577
578 # Worst case 20 == len(str(2**64))
579 fmt = '%5s %15s %15s %7s %s'
580 print(fmt % ('FLAG', 'SOFT', 'HARD', 'FACTOR', 'DESC'))
581 for flag, what, factor, desc in self._Table():
582 soft, hard = pyos.GetRLimit(what)
583
584 soft2 = _LimitString(soft, factor)
585 hard2 = _LimitString(hard, factor)
586 print(fmt % (flag, soft2, hard2, str(factor), desc))
587
588 return 0
589
590 if num_what_flags == 0:
591 what = RLIMIT_FSIZE # -f is the default
592
593 s, s_loc = arg_r.Peek2()
594
595 if s is None:
596 factor = self._FindFactor(what)
597 soft, hard = pyos.GetRLimit(what)
598 if arg.H:
599 print(_LimitString(hard, factor))
600 else:
601 print(_LimitString(soft, factor))
602 return 0
603
604 # Set the given resource
605 if s == 'unlimited':
606 # In C, RLIM_INFINITY is rlim_t
607 limit = mops.FromC(RLIM_INFINITY)
608 else:
609 if match.LooksLikeInteger(s):
610 ok, big_int = mops.FromStr2(s)
611 if not ok:
612 raise error.Usage('Integer too big: %s' % s, s_loc)
613 else:
614 raise error.Usage(
615 "expected a number or 'unlimited', got %r" % s, s_loc)
616
617 if mops.Greater(mops.IntWiden(0), big_int):
618 raise error.Usage(
619 "doesn't accept negative numbers, got %r" % s, s_loc)
620
621 factor = self._FindFactor(what)
622
623 fac = mops.IntWiden(factor)
624 limit = mops.Mul(big_int, fac)
625
626 # Overflow check like bash does
627 # TODO: This should be replaced with a different overflow check
628 # when we have arbitrary precision integers
629 if not mops.Equal(mops.Div(limit, fac), big_int):
630 #log('div %s', mops.ToStr(mops.Div(limit, fac)))
631 raise error.Usage(
632 'detected integer overflow: %s' % mops.ToStr(big_int),
633 s_loc)
634
635 arg_r.Next()
636 extra2, extra_loc2 = arg_r.Peek2()
637 if extra2 is not None:
638 raise error.Usage('got extra arg', extra_loc2)
639
640 # Now set the resource
641 soft, hard = pyos.GetRLimit(what)
642
643 # For error message
644 old_soft = soft
645 old_hard = hard
646
647 # Bash behavior: manipulate both, unless a flag is parsed. This
648 # differs from zsh!
649 if not arg.S and not arg.H:
650 soft = limit
651 hard = limit
652 if arg.S:
653 soft = limit
654 if arg.H:
655 hard = limit
656
657 if mylib.PYTHON:
658 try:
659 pyos.SetRLimit(what, soft, hard)
660 except OverflowError: # only happens in CPython
661 raise error.Usage('detected overflow', s_loc)
662 except (ValueError, resource.error) as e:
663 # Annoying: Python binding changes IOError -> ValueError
664
665 print_stderr('oils: ulimit error: %s' % e)
666
667 # Extra info we could expose in C++ too
668 print_stderr('soft=%s hard=%s -> soft=%s hard=%s' % (
669 _LimitString(old_soft, factor),
670 _LimitString(old_hard, factor),
671 _LimitString(soft, factor),
672 _LimitString(hard, factor),
673 ))
674 return 1
675 else:
676 try:
677 pyos.SetRLimit(what, soft, hard)
678 except (IOError, OSError) as e:
679 print_stderr('oils: ulimit error: %s' % pyutil.strerror(e))
680 return 1
681
682 return 0
683
684
685# vim: sw=4