forked from mjavint/docker-odoo-dev
-
Notifications
You must be signed in to change notification settings - Fork 2
/
misc.py
1716 lines (1428 loc) · 57.6 KB
/
misc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
Miscellaneous tools used by OpenERP.
"""
import cProfile
import collections
import datetime
import hmac as hmac_lib
import hashlib
import io
import os
import pickle as pickle_
import re
import socket
import subprocess
import sys
import tempfile
import threading
import time
import traceback
import types
import unicodedata
import zipfile
from collections import OrderedDict
from collections.abc import Iterable, Mapping, MutableMapping, MutableSet
from contextlib import contextmanager
from difflib import HtmlDiff
from functools import wraps
from itertools import islice, groupby as itergroupby
from operator import itemgetter
import babel
import babel.dates
import passlib.utils
import pytz
from lxml import etree
import odoo
import odoo.addons
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from odoo.loglevels import get_encodings, ustr, exception_to_unicode # noqa
from . import pycompat
from .cache import *
from .config import config
from .parse_version import parse_version
from .which import which
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase, etree._Entity)
# Configure default global parser
etree.set_default_parser(etree.XMLParser(resolve_entities=False))
NON_BREAKING_SPACE = u'\N{NO-BREAK SPACE}'
#----------------------------------------------------------
# Subprocesses
#----------------------------------------------------------
def find_in_path(name):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
if config.get('bin_path') and config['bin_path'] != 'None':
path.append(config['bin_path'])
return which(name, path=os.pathsep.join(path))
def _exec_pipe(prog, args, env=None):
cmd = (prog,) + args
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
close_fds = os.name=="posix"
pop = subprocess.Popen(cmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=close_fds, env=env)
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Command `%s` not found.' % name)
return _exec_pipe(prog, args)
#----------------------------------------------------------
# Postgres subprocesses
#----------------------------------------------------------
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
raise Exception('Command `%s` not found.' % name)
def exec_pg_environ():
"""
Force the database PostgreSQL environment variables to the database
configuration of Odoo.
Note: On systems where pg_restore/pg_dump require an explicit password
(i.e. on Windows where TCP sockets are used), it is necessary to pass the
postgres user password in the PGPASSWORD environment variable or in a
special .pgpass file.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
"""
env = os.environ.copy()
if odoo.tools.config['db_host']:
env['PGHOST'] = odoo.tools.config['db_host']
if odoo.tools.config['db_port']:
env['PGPORT'] = str(odoo.tools.config['db_port'])
if odoo.tools.config['db_user']:
env['PGUSER'] = odoo.tools.config['db_user']
if odoo.tools.config['db_password']:
env['PGPASSWORD'] = odoo.tools.config['db_password']
return env
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
args2 = (prog,) + args
rc = subprocess.call(args2, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
if rc:
raise Exception('Postgres subprocess %s error %s' % (args2, rc))
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
return _exec_pipe(prog, args, env)
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False, filter_ext=None, env=None):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@param filter_ext: optional list of supported extensions (without leading dot)
@param env: optional environment, required for a file path within a temporary directory
created using `file_open_temporary_directory()`
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
adps = odoo.addons.__path__
rtp = os.path.normcase(os.path.abspath(config['root_path']))
if env and hasattr(env.all, '__file_open_tmp_paths'):
adps = adps + list(env.all.__file_open_tmp_paths)
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename, filter_ext=filter_ext,
env=env)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename, filter_ext=filter_ext, env=env)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename, filter_ext=filter_ext, env=env)
def _fileopen(path, mode, basedir, pathinfo, basename=None, filter_ext=None, env=None):
name = os.path.normpath(os.path.normcase(os.path.join(basedir, path)))
paths = odoo.addons.__path__ + [config['root_path']]
if env and hasattr(env.all, '__file_open_tmp_paths'):
paths += env.all.__file_open_tmp_paths
for addons_path in paths:
addons_path = os.path.normpath(os.path.normcase(addons_path)) + os.sep
if name.startswith(addons_path):
break
else:
raise ValueError("Unknown path: %s" % name)
if filter_ext and not name.lower().endswith(filter_ext):
raise ValueError("Unsupported path: %s" % name)
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
if 'b' in mode:
fo = open(name, mode)
else:
fo = io.open(name, mode, encoding='utf-8')
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
zfile = zipfile.ZipFile(zpath)
try:
fo = io.BytesIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r does not exist or has been deleted' % basename)
raise IOError('File not found: %s' % basename)
@contextmanager
def file_open_temporary_directory(env):
"""Create and return a temporary directory added to the directories `file_open` is allowed to read from.
`file_open` will be allowed to open files within the temporary directory
only for environments of the same transaction than `env`.
Meaning, other transactions/requests from other users or even other databases
won't be allowed to open files from this directory.
Examples::
>>> with odoo.tools.file_open_temporary_directory(self.env) as module_dir:
... with zipfile.ZipFile('foo.zip', 'r') as z:
... z.extract('foo/__manifest__.py', module_dir)
... with odoo.tools.file_open('foo/__manifest__.py', env=self.env) as f:
... manifest = f.read()
:param env: environment for which the temporary directory is created.
:return: the absolute path to the created temporary directory
"""
assert not hasattr(env.all, '__file_open_tmp_paths'), 'Reentrancy is not implemented for this method'
with tempfile.TemporaryDirectory() as module_dir:
try:
env.all.__file_open_tmp_paths = (module_dir,)
yield module_dir
finally:
del env.all.__file_open_tmp_paths
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a unique list
Author: Christophe Simonis ([email protected])
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
r = []
for e in list:
if isinstance(e, (bytes, str)) or not isinstance(e, collections.abc.Iterable):
r.append(e)
else:
r.extend(flatten(e))
return r
def reverse_enumerate(l):
"""Like enumerate but in the other direction
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return zip(range(len(l)-1, -1, -1), reversed(l))
def partition(pred, elems):
""" Return a pair equivalent to:
``filter(pred, elems), filter(lambda x: not pred(x), elems)` """
yes, nos = [], []
for elem in elems:
(yes if pred(elem) else nos).append(elem)
return yes, nos
def topological_sort(elems):
""" Return a list of elements sorted so that their dependencies are listed
before them in the result.
:param elems: specifies the elements to sort with their dependencies; it is
a dictionary like `{element: dependencies}` where `dependencies` is a
collection of elements that must appear before `element`. The elements
of `dependencies` are not required to appear in `elems`; they will
simply not appear in the result.
:returns: a list with the keys of `elems` sorted according to their
specification.
"""
# the algorithm is inspired by [Tarjan 1976],
# http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
result = []
visited = set()
def visit(n):
if n not in visited:
visited.add(n)
if n in elems:
# first visit all dependencies of n, then append n to result
for it in elems[n]:
visit(it)
result.append(n)
for el in elems:
visit(el)
return result
def merge_sequences(*iterables):
""" Merge several iterables into a list. The result is the union of the
iterables, ordered following the partial order given by the iterables,
with a bias towards the end for the last iterable::
seq = merge_sequences(['A', 'B', 'C'])
assert seq == ['A', 'B', 'C']
seq = merge_sequences(
['A', 'B', 'C'],
['Z'], # 'Z' can be anywhere
['Y', 'C'], # 'Y' must precede 'C';
['A', 'X', 'Y'], # 'X' must follow 'A' and precede 'Y'
)
assert seq == ['A', 'B', 'X', 'Y', 'C', 'Z']
"""
# we use an OrderedDict to keep elements in order by default
deps = OrderedDict() # {item: elems_before_item}
for iterable in iterables:
prev = None
for index, item in enumerate(iterable):
if not index:
deps.setdefault(item, [])
else:
deps.setdefault(item, []).append(prev)
prev = item
return topological_sort(deps)
try:
import xlwt
# add some sanitization to respect the excel sheet name restrictions
# as the sheet name is often translatable, can not control the input
class PatchedWorkbook(xlwt.Workbook):
def add_sheet(self, name, cell_overwrite_ok=False):
# invalid Excel character: []:*?/\
name = re.sub(r'[\[\]:*?/\\]', '', name)
# maximum size is 31 characters
name = name[:31]
return super(PatchedWorkbook, self).add_sheet(name, cell_overwrite_ok=cell_overwrite_ok)
xlwt.Workbook = PatchedWorkbook
except ImportError:
xlwt = None
try:
import xlsxwriter
# add some sanitization to respect the excel sheet name restrictions
# as the sheet name is often translatable, can not control the input
class PatchedXlsxWorkbook(xlsxwriter.Workbook):
# TODO when xlsxwriter bump to 0.9.8, add worksheet_class=None parameter instead of kw
def add_worksheet(self, name=None, **kw):
if name:
# invalid Excel character: []:*?/\
name = re.sub(r'[\[\]:*?/\\]', '', name)
# maximum size is 31 characters
name = name[:31]
return super(PatchedXlsxWorkbook, self).add_worksheet(name, **kw)
xlsxwriter.Workbook = PatchedXlsxWorkbook
except ImportError:
xlsxwriter = None
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
csvpath = odoo.modules.module.get_resource_path('base', 'data', 'res.lang.csv')
try:
# read (code, name) from languages in base/data/res.lang.csv
with open(csvpath, 'rb') as csvfile:
reader = pycompat.csv_reader(csvfile, delimiter=',', quotechar='"')
fields = next(reader)
code_index = fields.index("code")
name_index = fields.index("name")
result = [
(row[code_index], row[name_index])
for row in reader
]
except Exception:
_logger.error("Could not read %s", csvpath)
result = []
return sorted(result or [('en_US', u'English')], key=itemgetter(1))
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def str2bool(s, default=None):
s = ustr(s).lower()
y = 'y yes 1 true t on'.split()
n = 'n no 0 false f off'.split()
if s not in (y + n):
if default is None:
raise ValueError('Use 0/1/yes/no/true/false/on/off')
return bool(default)
return s in y
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb', 'Tb')
if isinstance(sz, str):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.__name__,)))
return result
return wrapper
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)]
for ifname in [iface for iface in ifaces if iface if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
DATE_LENGTH = len(datetime.date.today().strftime(DEFAULT_SERVER_DATE_FORMAT))
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
:param int n: maximum size of each generated chunk
:param Iterable iterable: iterable to chunk into pieces
:param piece_maker: callable taking an iterable and collecting each
chunk from its slice, *must consume the entire slice*.
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
# ---------------------------------------------
# String management
# ---------------------------------------------
# Inspired by http://stackoverflow.com/questions/517923
def remove_accents(input_str):
"""Suboptimal-but-better-than-nothing way to replace accented
latin letters by an ASCII equivalent. Will obviously change the
meaning of input_str and work only for some cases"""
if not input_str:
return input_str
input_str = ustr(input_str)
nkfd_form = unicodedata.normalize('NFKD', input_str)
return u''.join([c for c in nkfd_form if not unicodedata.combining(c)])
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('odoo.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('odoo.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, str),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
__next__ = next
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-u', '--update', '-i', '--init', '--i18n-overwrite']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in itergroupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict updates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None, thread_idents=None):
""" Signal handler: dump a stack trace for each existing thread or given
thread(s) specified through the ``thread_idents`` sequence.
"""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = {th.ident: {'repr': repr(th),
'uid': getattr(th, 'uid', 'n/a'),
'dbname': getattr(th, 'dbname', 'n/a'),
'url': getattr(th, 'url', 'n/a')}
for th in threading.enumerate()}
for threadId, stack in sys._current_frames().items():
if not thread_idents or threadId in thread_idents:
thread_info = threads_info.get(threadId, {})
code.append("\n# Thread: %s (db:%s) (uid:%s) (url:%s)" %
(thread_info.get('repr', threadId),
thread_info.get('dbname', 'n/a'),
thread_info.get('uid', 'n/a'),
thread_info.get('url', 'n/a')))
for line in extract_stack(stack):
code.append(line)
if odoo.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet