ast_errors stringlengths 0 3.2k | d_id int64 44 121k | id int64 70 338k | n_whitespaces int64 3 14k | path stringlengths 8 134 | n_words int64 4 4.82k | n_identifiers int64 1 131 | random_cut stringlengths 16 15.8k | commit_message stringlengths 2 15.3k | fun_name stringlengths 1 84 | commit_id stringlengths 40 40 | repo stringlengths 3 28 | file_name stringlengths 5 79 | ast_levels int64 6 31 | nloc int64 1 548 | url stringlengths 31 59 | complexity int64 1 66 | token_counts int64 6 2.13k | n_ast_errors int64 0 28 | vocab_size int64 4 1.11k | n_ast_nodes int64 15 19.2k | language stringclasses 1
value | documentation dict | code stringlengths 101 62.2k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
@serializable
@register | 53,110 | 211,506 | 881 | ppdet/modeling/losses/probiou_loss.py | 181 | 46 | def probiou_loss(pred, target, eps=1e-3, mode='l1'):
gbboxes1 = gbb_form(pred)
gbboxes2 = gbb_form(target)
x1, y1, a1_, b1_, c1_ = gbboxes1[:,
| add fcosr model (#6765)
* add fcosr
* fix some problem
* add docs for fcosr
* modify code
* modify focsr reader
* finish tensorrt deployment with dynamic shape
* modify according to review comment
Co-authored-by: wangxinxin08 <> | probiou_loss | 92078713cced4f0d9450a6fc80a449fa75fd8c10 | PaddleDetection | probiou_loss.py | 17 | 32 | https://github.com/PaddlePaddle/PaddleDetection.git | 3 | 383 | 1 | 95 | 553 | Python | {
"docstring": "\n pred -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours predicted box ;in case of HBB angle == 0\n target -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours target box ;in case of HBB angle == 0\n eps -> threshold to avoid infinite values\n ... | def probiou_loss(pred, target, eps=1e-3, mode='l1'):
gbboxes1 = gbb_form(pred)
gbboxes2 = gbb_form(target)
x1, y1, a1_, b1_, c1_ = gbboxes1[:,
0], gbboxes1[:,
1], gbboxes1[:,
... |
42,834 | 178,818 | 20 | nuitka/Options.py | 11 | 3 | def mayDisableConsoleWindow():
# TODO: What about | Standalone: Added support for requiring modes
* For wx on macOS, console must be disabled, avoid the trap.
* For the PySide2, on macOS the --onefile must be used when the
application bundle is built or else signing has issues.
* Recommend to use new option --disable-console for PySide2, PySide6
and wx on non-macO... | mayDisableConsoleWindow | 613c31d98f20bdd9a4e5884c99826a06a3328438 | Nuitka | Options.py | 8 | 2 | https://github.com/Nuitka/Nuitka.git | 2 | 13 | 0 | 11 | 27 | Python | {
"docstring": ":returns: bool derived from platform support of disabling the console,",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def mayDisableConsoleWindow():
# TODO: What about MSYS2?
return isWin32Windows() or isMacOS()
| |
70,277 | 244,197 | 34 | mmdet/utils/compat_config.py | 16 | 7 | def compat_cfg(cfg):
cfg = copy.deepcopy(cfg)
cfg = compat_imgs_per_gpu(cfg)
cfg = compat_loader_args(cfg)
cfg = compat_runner_args(cfg)
return cf | [Feature] Support set dataloader args in config and and add function to handle config compatibility (#7668)
* add cfg_compatibility and support loader args
* resolve comments
* add unitest
* resolve comments
* delete all warning | compat_cfg | dc14675f79681b88ce2c5a3ca3c69901b415ffe4 | mmdetection | compat_config.py | 8 | 6 | https://github.com/open-mmlab/mmdetection.git | 1 | 34 | 0 | 9 | 59 | Python | {
"docstring": "This function would modify some filed to keep the compatibility of\n config.\n\n For example, it will move some args which will be deprecated to the correct\n fields.\n ",
"language": "en",
"n_whitespaces": 39,
"n_words": 27,
"vocab_size": 23
} | def compat_cfg(cfg):
cfg = copy.deepcopy(cfg)
cfg = compat_imgs_per_gpu(cfg)
cfg = compat_loader_args(cfg)
cfg = compat_runner_args(cfg)
return cfg
| |
55,293 | 218,412 | 93 | python3.10.4/Lib/inspect.py | 44 | 13 | def getsourcelines(object):
object = unwrap(object)
lines, lnum = findsource(object)
if istraceback(object):
object = object.tb_frame
# for module or frame that corresponds to module, return all source lines
if (ismodule(object) or
| add python 3.10.4 for windows | getsourcelines | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | inspect.py | 13 | 10 | https://github.com/XX-net/XX-Net.git | 5 | 73 | 0 | 35 | 123 | Python | {
"docstring": "Return a list of source lines and starting line number for an object.\n\n The argument may be a module, class, method, function, traceback, frame,\n or code object. The source code is returned as a list of the lines\n corresponding to the object and the line number indicates where in the\n ... | def getsourcelines(object):
object = unwrap(object)
lines, lnum = findsource(object)
if istraceback(object):
object = object.tb_frame
# for module or frame that corresponds to module, return all source lines
if (ismodule(object) or
(isframe(object) and object.f_code.co_name ==... | |
51,131 | 205,453 | 155 | django/db/models/deletion.py | 39 | 16 | def get_del_batches(self, objs, fields):
| Refs #33476 -- Reformatted code with Black. | get_del_batches | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | deletion.py | 13 | 12 | https://github.com/django/django.git | 4 | 82 | 0 | 34 | 123 | Python | {
"docstring": "\n Return the objs in suitably sized batches for the used connection.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 10
} | def get_del_batches(self, objs, fields):
field_names = [field.name for field in fields]
conn_batch_size = max(
connections[self.using].ops.bulk_batch_size(field_names, objs), 1
)
if len(objs) > conn_batch_size:
return [
objs[i : i + conn_b... | |
1,048 | 6,670 | 47 | ludwig/utils/checkpoint_utils.py | 12 | 11 | def save(self, global_step):
save_path = osp.join(s | Add file lock on training checkpoints to prevent race condition (#1938) | save | 44356d2d07370b7044640a068ace95842d5ce98c | ludwig | checkpoint_utils.py | 11 | 5 | https://github.com/ludwig-ai/ludwig.git | 1 | 42 | 0 | 10 | 75 | Python | {
"docstring": "Create a new checkpoint.\n\n Args:\n global_step (int): The iteration number which will be used\n to name the checkpoint.\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 18,
"vocab_size": 17
} | def save(self, global_step):
save_path = osp.join(self.directory, f"{global_step:09d}.ckpt")
self.checkpoint.save(save_path)
self.latest_checkpoint = save_path
self.queue.put(True)
| |
47,301 | 195,586 | 156 | versioneer.py | 52 | 16 | def versions_from_file(filename):
try:
with open(filename) as f:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = # END | add auto tag | versions_from_file | f0194812568c83585ff09488fe7f67df300938cc | rembg | versioneer.py | 12 | 14 | https://github.com/danielgatis/rembg.git | 4 | 94 | 0 | 34 | 159 | Python | {
"docstring": "Try to determine the version from _version.py if present.\\n(.*)\\r\\n(.*)",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def versions_from_file(filename):
try:
with open(filename) as f:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo =... | |
13,724 | 64,798 | 3 | erpnext/accounts/doctype/bank_transaction/bank_transaction.py | 9 | 7 | def get_total_allocated_amount(payment_entry):
return frappe.db.sql(
,
(payment_entry.p | style: format code with black | get_total_allocated_amount | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | bank_transaction.py | 9 | 19 | https://github.com/frappe/erpnext.git | 1 | 29 | 0 | 9 | 44 | Python | {
"docstring": "\n\t\tSELECT\n\t\t\tSUM(btp.allocated_amount) as allocated_amount,\n\t\t\tbt.name\n\t\tFROM\n\t\t\t`tabBank Transaction Payments` as btp\n\t\tLEFT JOIN\n\t\t\t`tabBank Transaction` bt ON bt.name=btp.parent\n\t\tWHERE\n\t\t\tbtp.payment_document = %s\n\t\tAND\n\t\t\tbtp.payment_entry = %s\n\t\tAND\n\t\... | def get_total_allocated_amount(payment_entry):
return frappe.db.sql(
,
(payment_entry.payment_document, payment_entry.payment_entry),
as_dict=True,
)
| |
@not_implemented_for("multigraph")
@not_implemented_for("directed") | 41,855 | 176,369 | 82 | networkx/algorithms/matching.py | 53 | 21 | def min_weight_matching(G, maxcardinality=False, weight="weight"):
if len(G.edges) == 0:
return max_weight_matching(G, maxcardinality, weight)
G_edges = G.edges(data=weight, default=1)
min_weight = min(w for _, _, w in G_edges)
InvG = nx.Graph()
edges = ((u, v, 1 / (1 + w - min_weight))... | Update matching functions for error validation and speed (#4897)
* First steps to update matching functions for #4644
Expand tests
Change API to raise NetworkXError when matching involves nodes not in G
Update is_*_matching to 100+ times faster.
* improve matching_dict_to_set and docs for min_weight_matching
... | min_weight_matching | 28b3014d68d2b4e40d3e02219770296a827bd55c | networkx | matching.py | 12 | 9 | https://github.com/networkx/networkx.git | 4 | 114 | 1 | 40 | 191 | Python | {
"docstring": "Computing a minimum-weight maximal matching of G.\n\n Use reciprocal edge weights with the maximum-weight algorithm.\n\n A matching is a subset of edges in which no node occurs more than once.\n The weight of a matching is the sum of the weights of its edges.\n A maximal matching cannot ad... | def min_weight_matching(G, maxcardinality=False, weight="weight"):
if len(G.edges) == 0:
return max_weight_matching(G, maxcardinality, weight)
G_edges = G.edges(data=weight, default=1)
min_weight = min(w for _, _, w in G_edges)
InvG = nx.Graph()
edges = ((u, v, 1 / (1 + w - min_weight))... |
3,270 | 20,218 | 20 | pipenv/patched/notpip/_vendor/platformdirs/macos.py | 6 | 4 | def site_config_dir(self) -> str:
return self._append_app_name_and_version("/Libr | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | site_config_dir | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | macos.py | 8 | 3 | https://github.com/pypa/pipenv.git | 1 | 15 | 0 | 6 | 29 | Python | {
"docstring": ":return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def site_config_dir(self) -> str:
return self._append_app_name_and_version("/Library/Preferences")
| |
36,095 | 154,585 | 310 | modin/experimental/core/execution/native/implementations/hdk_on_native/expr.py | 99 | 18 | def _cmp_op(self, other, op_name):
lhs_dtype_class = self._get_dtype_cmp_class(self._dtype)
rhs_dtype_class = self._get_dtype_cmp_class(other._dtype)
res_dtype = get_dtype(bool)
# In HDK comparison with NULL always results in NULL,
# but in pandas it is True for 'ne' com... | FEAT-#4946: Replace OmniSci with HDK (#4947)
Co-authored-by: Iaroslav Igoshev <Poolliver868@mail.ru>
Signed-off-by: Andrey Pavlenko <andrey.a.pavlenko@gmail.com> | _cmp_op | e5b1888cd932909e49194d58035da34b210b91c4 | modin | expr.py | 15 | 16 | https://github.com/modin-project/modin.git | 4 | 106 | 0 | 70 | 192 | Python | {
"docstring": "\n Build a comparison expression.\n\n Parameters\n ----------\n other : BaseExpr\n A value to compare with.\n op_name : str\n The comparison operation name.\n\n Returns\n -------\n BaseExpr\n The resulting compari... | def _cmp_op(self, other, op_name):
lhs_dtype_class = self._get_dtype_cmp_class(self._dtype)
rhs_dtype_class = self._get_dtype_cmp_class(other._dtype)
res_dtype = get_dtype(bool)
# In HDK comparison with NULL always results in NULL,
# but in pandas it is True for 'ne' com... | |
23,965 | 110,191 | 249 | lib/matplotlib/widgets.py | 47 | 27 | def set_active(self, index):
if index not in range(len(self.labels)):
raise ValueError(f'Invalid CheckButton index: {index}')
if colors.same_color(
self._ | Use scatter for check boxes instead of Rectangle
With the current implementation, the boxes get stretched into rectangles
if the aspect ratio is not maintained. To overcome this, the boxes are
now created using scatter instead to maintain their shapes. | set_active | 723cd86d7d7bdc14a4d3fc0e08c3a01e72d310b6 | matplotlib | widgets.py | 17 | 18 | https://github.com/matplotlib/matplotlib.git | 8 | 174 | 0 | 37 | 295 | Python | {
"docstring": "\n Toggle (activate or deactivate) a check button by index.\n\n Callbacks will be triggered if :attr:`eventson` is True.\n\n Parameters\n ----------\n index : int\n Index of the check button to toggle.\n\n Raises\n ------\n ValueError\... | def set_active(self, index):
if index not in range(len(self.labels)):
raise ValueError(f'Invalid CheckButton index: {index}')
if colors.same_color(
self._crosses.get_facecolor()[index], colors.to_rgba("none")
):
self._crosses.get_facecolor()[inde... | |
103,215 | 304,408 | 75 | homeassistant/components/ebox/sensor.py | 17 | 8 | async def async_update(self) -> None:
await s | Improve entity type hints [e] (#77041) | async_update | 3a3f41f3df932368791d3ee3f5fbae5fb3b38bfe | core | sensor.py | 13 | 7 | https://github.com/home-assistant/core.git | 2 | 50 | 0 | 17 | 82 | Python | {
"docstring": "Get the latest data from EBox and update the state.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | async def async_update(self) -> None:
await self.ebox_data.async_update()
if self.entity_description.key in self.ebox_data.data:
self._attr_native_value = round(
self.ebox_data.data[self.entity_description.key], 2
)
| |
70,312 | 244,297 | 474 | tools/analysis_tools/analyze_results.py | 102 | 49 | def panoptic_evaluate(self, dataset, results, topk=20):
# image to annotations
gt_json = dataset.coco.img_ann_map
result_files, tmp_dir = dataset.format_results(results)
pred_json = mmcv.load(result_files['panoptic'])['annotations']
pred_folder = osp.join(tmp_dir.name, ... | [Feature] Support panoptic segmentation result analysis (#7922)
* support analyze panoptic segmentation result
* fix lint
* update docstring
* update docstring
* set print_log=False by default
* update
* fix bug 8035 | panoptic_evaluate | f3a451abab8fc89810b317ca0a88ee9fd12cb0c2 | mmdetection | analyze_results.py | 13 | 34 | https://github.com/open-mmlab/mmdetection.git | 3 | 248 | 0 | 80 | 399 | Python | {
"docstring": "Evaluation for panoptic segmentation.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n results (list): Panoptic segmentation results from test\n results pkl file.\n topk (int): Number of the highest topk and\n lowest topk after e... | def panoptic_evaluate(self, dataset, results, topk=20):
# image to annotations
gt_json = dataset.coco.img_ann_map
result_files, tmp_dir = dataset.format_results(results)
pred_json = mmcv.load(result_files['panoptic'])['annotations']
pred_folder = osp.join(tmp_dir.name, ... | |
76,702 | 261,234 | 298 | sklearn/feature_selection/_mutual_info.py | 113 | 37 | def _compute_mi_cd(c, d, n_neighbors):
n_samples = c.shape[0]
c = c.reshape((-1, 1))
radius = np.empty(n_samples)
label_counts = np.empty(n_samples)
k_all = np.empty(n_samples)
nn = NearestNeighbors()
for label in np.unique(d):
mask = d == label
count = np.sum(mask)
... | CLN Remove unnecessary operation in mutual_info (#24569) | _compute_mi_cd | c22be1defcf3e59ebd79ed3e479ada8ea558f601 | scikit-learn | _mutual_info.py | 14 | 34 | https://github.com/scikit-learn/scikit-learn.git | 3 | 270 | 0 | 69 | 422 | Python | {
"docstring": "Compute mutual information between continuous and discrete variables.\n\n Parameters\n ----------\n c : ndarray, shape (n_samples,)\n Samples of a continuous random variable.\n\n d : ndarray, shape (n_samples,)\n Samples of a discrete random variable.\n\n n_neighbors : int... | def _compute_mi_cd(c, d, n_neighbors):
n_samples = c.shape[0]
c = c.reshape((-1, 1))
radius = np.empty(n_samples)
label_counts = np.empty(n_samples)
k_all = np.empty(n_samples)
nn = NearestNeighbors()
for label in np.unique(d):
mask = d == label
count = np.sum(mask)
... | |
50,251 | 203,216 | 47 | django/core/management/base.py | 15 | 5 | def handle_app_config(self, app_config, **options):
raise Not | Refs #33476 -- Refactored problematic code before reformatting by Black.
In these cases Black produces unexpected results, e.g.
def make_random_password(
self,
length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789',
):
or
cursor.execute("""
SELECT ...
""",
... | handle_app_config | c5cd8783825b5f6384417dac5f3889b4210b7d08 | django | base.py | 8 | 4 | https://github.com/django/django.git | 1 | 16 | 0 | 15 | 29 | Python | {
"docstring": "\n Perform the command's actions for app_config, an AppConfig instance\n corresponding to an application label given on the command line.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 17
} | def handle_app_config(self, app_config, **options):
raise NotImplementedError(
"Subclasses of AppCommand must provide a handle_app_config() method."
)
| |
@pytest.mark.parametrize(
"max_features",
[lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)],
) | 75,720 | 259,339 | 70 | sklearn/feature_selection/tests/test_from_model.py | 38 | 23 | def test_inferred_max_features_integer(max_features):
clf = RandomForestClassifier(n_estimators=5, random_state=0)
transformer = SelectFromModel(
estimator=clf, max_features=max_features, threshold=-np.inf
)
X_trans = transformer.fit_transform(data, y)
assert transformer.max_features_ =... | ENH Allow `SelectFromModel`'s `max_features` to accept callables (#22356)
* Initial implementation
* Improved error handling and stability
* Added unit tests
* Updated test to use `max_features_` instead of `max_features`
* Added documentation for new private attribute `max_features_`
* Improved error h... | test_inferred_max_features_integer | db24a30bd3b90a9d55e82e450631de96305744f7 | scikit-learn | test_from_model.py | 12 | 8 | https://github.com/scikit-learn/scikit-learn.git | 1 | 64 | 1 | 29 | 162 | Python | {
"docstring": "Check max_features_ and output shape for integer max_features.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def test_inferred_max_features_integer(max_features):
clf = RandomForestClassifier(n_estimators=5, random_state=0)
transformer = SelectFromModel(
estimator=clf, max_features=max_features, threshold=-np.inf
)
X_trans = transformer.fit_transform(data, y)
assert transformer.max_features_ =... |
42,228 | 177,016 | 40 | networkx/algorithms/tests/test_lowest_common_ancestors.py | 12 | 12 | def test_naive_all_pairs_lowest_common_ancestor3(self):
all_pairs = product(self.DG.nodes(), self.DG.nodes())
ans = naive_all_pairs_lca(self.DG, pairs=all_pairs)
self.assert_lca_dicts_same(dict(ans), self.gold)
| Naive lowest common ancestor implementation (#5736)
* Add naive lca methods
* Naive algorithm implementation for LCA
* Modify naive lca functions
* Correct parameters of nx.ancestors
* Update lowest_common_ancestors.py
* Parametrize tests
* Apply suggestions from code review
Co-authored-by: Dan Sc... | test_naive_all_pairs_lowest_common_ancestor3 | b2f91c34a23058dd70b41784af0d87890216026a | networkx | test_lowest_common_ancestors.py | 11 | 4 | https://github.com/networkx/networkx.git | 1 | 51 | 0 | 11 | 83 | Python | {
"docstring": "Produces the correct results when all pairs given as a generator.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def test_naive_all_pairs_lowest_common_ancestor3(self):
all_pairs = product(self.DG.nodes(), self.DG.nodes())
ans = naive_all_pairs_lca(self.DG, pairs=all_pairs)
self.assert_lca_dicts_same(dict(ans), self.gold)
| |
51,063 | 205,281 | 79 | django/db/migrations/autodetector.py | 22 | 8 | def _resolve_dependency(dependency):
if dependency[0] != "__setting__":
return dependen | Refs #33476 -- Reformatted code with Black. | _resolve_dependency | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | autodetector.py | 11 | 7 | https://github.com/django/django.git | 2 | 54 | 0 | 21 | 89 | Python | {
"docstring": "\n Return the resolved dependency and a boolean denoting whether or not\n it was swappable.\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 14,
"vocab_size": 14
} | def _resolve_dependency(dependency):
if dependency[0] != "__setting__":
return dependency, False
resolved_app_label, resolved_object_name = getattr(
settings, dependency[1]
).split(".")
return (resolved_app_label, resolved_object_name.lower()) + dependenc... | |
@not_implemented_for("undirected")
@not_implemented_for("multigraph") | 42,203 | 176,975 | 54 | networkx/algorithms/lowest_common_ancestors.py | 23 | 11 | def lowest_common_ancestor(G, node1, node2, default=None):
ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)]))
if ans:
assert len(ans) == 1
return ans[0][1]
else:
return default
@not_implemented_for("undirected")
@not_implemented_for("multigraph") | Add examples to lowest common ancestors algorithms (#5531)
* Add examples to lowest common ancestors documentation
* Fix output style of examples
* Fix output style of example
* Update pre-commit
* Update networkx/algorithms/lowest_common_ancestors.py
Co-authored-by: Ross Barnowski <rossbar@berkeley.edu... | lowest_common_ancestor | abaa68779ccb4cce8d1a5ecade622ab96d01edeb | networkx | lowest_common_ancestors.py | 13 | 7 | https://github.com/networkx/networkx.git | 2 | 55 | 1 | 22 | 105 | Python | {
"docstring": "Compute the lowest common ancestor of the given pair of nodes.\n\n Parameters\n ----------\n G : NetworkX directed graph\n\n node1, node2 : nodes in the graph.\n\n default : object\n Returned if no common ancestor between `node1` and `node2`\n\n Returns\n -------\n The l... | def lowest_common_ancestor(G, node1, node2, default=None):
ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)]))
if ans:
assert len(ans) == 1
return ans[0][1]
else:
return default
@not_implemented_for("undirected")
@not_implemented_for("multigraph") |
14,058 | 65,933 | 16 | erpnext/education/report/program_wise_fee_collection/program_wise_fee_collection.py | 33 | 16 | def get_data(filters=None):
data = []
conditions = get_filter_conditions(filters)
fee_details = frappe.db.sql(
% (conditions),
as_dict=1,
)
for entry in fee_details:
data.append(
{
"program": entry.program,
"fees_collected": entry.paid_amount,
"outstanding_amount": entry.outstanding_amou... | style: format code with black | get_data | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | program_wise_fee_collection.py | 12 | 37 | https://github.com/frappe/erpnext.git | 2 | 74 | 0 | 29 | 121 | Python | {
"docstring": "\n\t\t\tSELECT\n\t\t\t\tFeesCollected.program,\n\t\t\t\tFeesCollected.paid_amount,\n\t\t\t\tFeesCollected.outstanding_amount,\n\t\t\t\tFeesCollected.grand_total\n\t\t\tFROM (\n\t\t\t\tSELECT\n\t\t\t\t\tsum(grand_total) - sum(outstanding_amount) AS paid_amount, program,\n\t\t\t\t\tsum(outstanding_amoun... | def get_data(filters=None):
data = []
conditions = get_filter_conditions(filters)
fee_details = frappe.db.sql(
% (conditions),
as_dict=1,
)
for entry in fee_details:
data.append(
{
"program": entry.program,
"fees_collected": entry.paid_amount,
"outstanding_amount": entry.outstanding_amou... | |
4,271 | 22,227 | 84 | pipenv/vendor/requirementslib/models/dependencies.py | 46 | 15 | def get_dependencies_from_json(ireq):
if ireq.editable or not is_pinned_requirement(ireq):
return
# It is technically possible to parse extras out of the JSON API's
| Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | get_dependencies_from_json | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | pipenv | dependencies.py | 12 | 18 | https://github.com/pypa/pipenv.git | 6 | 101 | 0 | 40 | 96 | Python | {
"docstring": "Retrieves dependencies for the given install requirement from the json\n api.\n\n :param ireq: A single InstallRequirement\n :type ireq: :class:`~pipenv.patched.pip._internal.req.req_install.InstallRequirement`\n :return: A set of dependency lines for generating new InstallRequirements.\n ... | def get_dependencies_from_json(ireq):
if ireq.editable or not is_pinned_requirement(ireq):
return
# It is technically possible to parse extras out of the JSON API's
# requirement format, but it is such a chore let's just use the simple API.
if ireq.extras:
return
session = re... | |
40,716 | 171,745 | 58 | pandas/core/frame.py | 20 | 12 | def assign(self, **kwargs) -> DataFrame:
r
data = self.copy(deep=None)
for k, v in kwargs.items():
| ENH/TST: expand copy-on-write to assign() method (#50010) | assign | 36dcf519c67a8098572447f7d5a896740fc9c464 | pandas | frame.py | 10 | 66 | https://github.com/pandas-dev/pandas.git | 2 | 48 | 0 | 18 | 75 | Python | {
"docstring": "\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable or Series}\n ... | def assign(self, **kwargs) -> DataFrame:
r
data = self.copy(deep=None)
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data
| |
70,305 | 244,278 | 368 | mmdet/models/dense_heads/solo_head.py | 40 | 14 | def resize_feats(self, feats):
out = []
for i in range(len(feats)):
if i == 0:
out.append(
F.interpolate(
feats[0],
size=feats[i + 1].shape[-2:],
mode='bilinear', | [Feature] Support SOLOv2 (#7441)
* solov2 init
* solov2 r18 lightweight
* add model docstrings and reformat the code
* add docstrings to model method
* add solov2 big model config and correct some errors in the docstring
* fix linting issues
* refactor code and configs
* rename variables according... | resize_feats | d18cdb140ef3cb9ed5fdef6f1a815f5836f1b1ab | mmdetection | solo_head.py | 19 | 20 | https://github.com/open-mmlab/mmdetection.git | 4 | 127 | 0 | 29 | 198 | Python | {
"docstring": "Downsample the first feat and upsample last feat in feats.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def resize_feats(self, feats):
out = []
for i in range(len(feats)):
if i == 0:
out.append(
F.interpolate(
feats[0],
size=feats[i + 1].shape[-2:],
mode='bilinear',
... | |
14,672 | 67,940 | 50 | erpnext/stock/report/stock_projected_qty/stock_projected_qty.py | 71 | 17 | def get_bin_list(filters):
conditions = []
if filters.item_code:
cond | style: format code with black | get_bin_list | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | stock_projected_qty.py | 16 | 24 | https://github.com/frappe/erpnext.git | 5 | 107 | 0 | 52 | 181 | Python | {
"docstring": "select item_code, warehouse, actual_qty, planned_qty, indented_qty,\n\t\tordered_qty, reserved_qty, reserved_qty_for_production, reserved_qty_for_sub_contract, projected_qty\n\t\tfrom tabBin bin {conditions} order by item_code, warehouse\n\t\t",
"language": "en",
"n_whitespaces": 16,
"n_words": ... | def get_bin_list(filters):
conditions = []
if filters.item_code:
conditions.append("item_code = '%s' " % filters.item_code)
if filters.warehouse:
warehouse_details = frappe.db.get_value(
"Warehouse", filters.warehouse, ["lft", "rgt"], as_dict=1
)
if warehouse_details:
conditions.append(
" exists... | |
80,828 | 271,613 | 373 | keras/engine/training.py | 113 | 9 | def run_eagerly(self):
if (
self.dynamic and self._run_eagerly is False
): # pylint:disable=g-bool-id-comparison
# TODO(fchollet): consider using py_func to enable this.
raise ValueError(
"Your model contains layers that can only be "
... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | run_eagerly | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | training.py | 12 | 20 | https://github.com/keras-team/keras.git | 8 | 68 | 0 | 80 | 127 | Python | {
"docstring": "Settable attribute indicating whether the model should run eagerly.\n\n Running eagerly means that your model will be run step by step,\n like Python code. Your model might run slower, but it should become easier\n for you to debug it by stepping into individual layer calls.\n\n ... | def run_eagerly(self):
if (
self.dynamic and self._run_eagerly is False
): # pylint:disable=g-bool-id-comparison
# TODO(fchollet): consider using py_func to enable this.
raise ValueError(
"Your model contains layers that can only be "
... | |
16,882 | 79,164 | 24 | wagtail/models/__init__.py | 10 | 5 | def get_preview_context(self, request, *args, **kwargs):
return {"object": self, "request": request}
| Add docs for PreviewableMixin | get_preview_context | e864b9c4d12ad0edd38283c17c2935e950e73520 | wagtail | __init__.py | 8 | 2 | https://github.com/wagtail/wagtail.git | 1 | 24 | 0 | 10 | 41 | Python | {
"docstring": "\n Returns a context dictionary for use in templates for previewing this object.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 11
} | def get_preview_context(self, request, *args, **kwargs):
return {"object": self, "request": request}
| |
99,145 | 300,279 | 228 | tests/components/mobile_app/test_sensor.py | 61 | 21 | async def test_default_disabling_entity(hass, create_registrations, webhook_client):
webhook_id = create_registrations[1]["webhook_id"]
webhook_url = f"/api/webhook/{webhook_id}"
reg_resp = await webhook_client.post(
w | Allow mobile app to disable entities by default (#71562) | test_default_disabling_entity | 539ce7ff0e9d9bc59cd8f028f245c09f802c89cb | core | test_sensor.py | 15 | 25 | https://github.com/home-assistant/core.git | 1 | 129 | 0 | 45 | 230 | Python | {
"docstring": "Test that sensors can be disabled by default upon registration.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | async def test_default_disabling_entity(hass, create_registrations, webhook_client):
webhook_id = create_registrations[1]["webhook_id"]
webhook_url = f"/api/webhook/{webhook_id}"
reg_resp = await webhook_client.post(
webhook_url,
json={
"type": "register_sensor",
... | |
36,490 | 155,918 | 1,545 | dask/dataframe/io/parquet/arrow.py | 379 | 51 | def _create_dd_meta(cls, dataset_info):
# Collect necessary information from dataset_info
schema = dataset_info["schema"]
index = dataset_info["index"]
categories = dataset_info["categories"]
partition_obj = dataset_info["partitions"]
partitions = dataset_info["... | Fix "physical" column bug in pyarrow-based read_parquet (#8775)
Starting with pyarrow-5.0, the `pyarrow.dataset` API can now be used to write parquet datasets. Using `pyarrow.dataset.write_dataset` to write partitioned data results in different "pandas metadata" than we get from a Dask-written dataset, because Dask wi... | _create_dd_meta | 73acebb3a2066792dea39c78245a6e1a01b2b173 | dask | arrow.py | 19 | 81 | https://github.com/dask/dask.git | 27 | 504 | 0 | 196 | 823 | Python | {
"docstring": "Use parquet schema and hive-partition information\n (stored in dataset_info) to construct DataFrame metadata.\n\n This method is used by both arrow engines.\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 21,
"vocab_size": 21
} | def _create_dd_meta(cls, dataset_info):
# Collect necessary information from dataset_info
schema = dataset_info["schema"]
index = dataset_info["index"]
categories = dataset_info["categories"]
partition_obj = dataset_info["partitions"]
partitions = dataset_info["... | |
9,392 | 48,185 | 243 | airflow/providers/google/cloud/transfers/postgres_to_gcs.py | 54 | 31 | def convert_type(self, value, schema_type, stringify_dict=True):
if isinstance(value, datetime.datetime):
iso_format_value = value.isoformat()
if value.tzinfo is None:
return iso_format_value
return pendulum.parse(iso_format_value).float_timestamp
... | Fix `PostgresToGCSOperator` does not allow nested JSON (#23063)
* Avoid double json.dumps for json data export in PostgresToGCSOperator.
* Fix CI | convert_type | 766726f2e3a282fcd2662f5dc6e9926dc38a6540 | airflow | postgres_to_gcs.py | 12 | 19 | https://github.com/apache/airflow.git | 8 | 149 | 0 | 35 | 231 | Python | {
"docstring": "\n Takes a value from Postgres, and converts it to a value that's safe for\n JSON/Google Cloud Storage/BigQuery.\n Timezone aware Datetime are converted to UTC seconds.\n Unaware Datetime, Date and Time are converted to ISO formatted strings.\n Decimals are converted... | def convert_type(self, value, schema_type, stringify_dict=True):
if isinstance(value, datetime.datetime):
iso_format_value = value.isoformat()
if value.tzinfo is None:
return iso_format_value
return pendulum.parse(iso_format_value).float_timestamp
... | |
81,820 | 276,990 | 49 | keras/utils/metrics_utils.py | 27 | 15 | def _filter_top_k(x, k):
_, top_k_idx = tf.math.top_k(x, k, sorted=False)
top_k_mask = tf.reduce_sum(
tf.one_ | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _filter_top_k | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | metrics_utils.py | 13 | 6 | https://github.com/keras-team/keras.git | 1 | 72 | 0 | 24 | 110 | Python | {
"docstring": "Filters top-k values in the last dim of x and set the rest to NEG_INF.\n\n Used for computing top-k prediction values in dense labels (which has the same\n shape as predictions) for recall and precision top-k metrics.\n\n Args:\n x: tensor with any dimensions.\n k: the number of val... | def _filter_top_k(x, k):
_, top_k_idx = tf.math.top_k(x, k, sorted=False)
top_k_mask = tf.reduce_sum(
tf.one_hot(top_k_idx, tf.shape(x)[-1], axis=-1), axis=-2
)
return x * top_k_mask + NEG_INF * (1 - top_k_mask)
| |
43,477 | 181,690 | 192 | tests/tpot_tests.py | 102 | 12 | def test_pick_two_individuals_eligible_for_crossover_bad():
ind1 = creator.Individual.from_string(
'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)',
tpot_obj._pset
)
ind2 = creator.Individual.from_string(
'BernoulliNB(input_matrix, BernoulliNB__a... | Revert "Deployed 7ccda9a with MkDocs version: 1.3.0"
This reverts commit bd9629c40e01241766197119b581a99409b07068. | test_pick_two_individuals_eligible_for_crossover_bad | 388616b6247ca4ea8de4e2f340d6206aee523541 | tpot | tpot_tests.py | 9 | 19 | https://github.com/EpistasisLab/tpot.git | 4 | 104 | 0 | 48 | 171 | Python | {
"docstring": "Assert that pick_two_individuals_eligible_for_crossover() returns the right output when no pair is eligible",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def test_pick_two_individuals_eligible_for_crossover_bad():
ind1 = creator.Individual.from_string(
'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)',
tpot_obj._pset
)
ind2 = creator.Individual.from_string(
'BernoulliNB(input_matrix, BernoulliNB__a... | |
52,808 | 209,826 | 77 | scapy/arch/windows/__init__.py | 26 | 9 | def get_ips(v6=False):
# type: (bool) -> Dict[NetworkInterface, List[ | [Hinty] Core typing: windows (#3684)
* Core typing: windows
Co-authored-by: Pierre <pierre@droids-corp.org> | get_ips | a2b7a28faff1db058dd22ce097a268e0ad5d1d33 | scapy | __init__.py | 13 | 8 | https://github.com/secdev/scapy.git | 3 | 53 | 0 | 22 | 86 | Python | {
"docstring": "Returns all available IPs matching to interfaces, using the windows system.\n Should only be used as a WinPcapy fallback.\n\n :param v6: IPv6 addresses\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 23,
"vocab_size": 23
} | def get_ips(v6=False):
# type: (bool) -> Dict[NetworkInterface, List[str]]
res = {}
for iface in six.itervalues(conf.ifaces):
if v6:
res[iface] = iface.ips[6]
else:
res[iface] = iface.ips[4]
return res
| |
6,898 | 38,013 | 31 | src/transformers/models/opt/modeling_opt.py | 16 | 9 | def _set_gradient_checkpointing(self, module, value=False):
| Add OPT (#17088)
* First version - OPT model
* Final changes
- putting use cache to False
* few changes
- remove commented block
* few changes
- remove unecessary files
* fix style issues
* few changes
- remove a test file
- added the logits test
* Update src/transformers/models/auto/tok... | _set_gradient_checkpointing | b971c769e80fe85fb7dd35c7cf65f3ac97ea6421 | transformers | modeling_opt.py | 9 | 3 | https://github.com/huggingface/transformers.git | 2 | 26 | 0 | 13 | 53 | Python | {
"docstring": "\n Generation example:\n\n ```python\n >>> from transformers import AutoTokenizer, AutoModelForCausalLM\n\n >>> model = OPTForCausalLM.from_pretrained(\"ArthurZ/opt-350m\")\n >>> tokenizer = GPT2Tokenizer.from_pretrained(\"patrickvonplaten/opt_gpt2_tokenizer\")\n\n >>> TEXTS_TO_GENER... | def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (OPTDecoder)):
module.gradient_checkpointing = value
OPT_GENERATION_EXAMPLE = r
OPT_INPUTS_DOCSTRING = r
| |
120,959 | 337,105 | 78 | examples/text_to_image/train_text_to_image.py | 29 | 7 | def to(self, device=None, dtype=None) -> None:
r
# .to() on the ten | [train_text2image] Fix EMA and make it compatible with deepspeed. (#813)
* fix ema
* style
* add comment about copy
* style
* quality | to | 008b608f1551dbcf521284ed0e7a6722cd02ef07 | diffusers | train_text_to_image.py | 11 | 10 | https://github.com/huggingface/diffusers.git | 3 | 56 | 0 | 28 | 85 | Python | {
"docstring": "Move internal buffers of the ExponentialMovingAverage to `device`.\n\n Args:\n device: like `device` argument to `torch.Tensor.to`\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 15,
"vocab_size": 14
} | def to(self, device=None, dtype=None) -> None:
r
# .to() on the tensors handles None correctly
self.shadow_params = [
p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device)
for p in self.shadow_params
]
| |
85,240 | 285,200 | 28 | openbb_terminal/econometrics/econometrics_model.py | 16 | 11 | def get_granger_causality(dependent_series, independent_series, lags):
granger_set = pd.concat([dependent_series, independent_series], axis=1)
granger = grangercausalitytests(granger_set, [lags], verbose=False)
return granger
| Here we merge all API Refactor related branches (#2236)
* Update api.py
* Updated forex menu
* refactor ycrv command
* refactor ycrv command black
* refactor ecocal command
* Minh changes
* Adding space to test pushing
* title fix ecocal df
* get economic calendar annotation
* fix investingc... | get_granger_causality | 9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b | OpenBBTerminal | econometrics_model.py | 9 | 4 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 1 | 42 | 0 | 14 | 63 | Python | {
"docstring": "Calculate granger tests\n\n Parameters\n ----------\n dependent_series: Series\n The series you want to test Granger Causality for.\n independent_series: Series\n The series that you want to test whether it Granger-causes time_series_y\n lags : int\n The amount of l... | def get_granger_causality(dependent_series, independent_series, lags):
granger_set = pd.concat([dependent_series, independent_series], axis=1)
granger = grangercausalitytests(granger_set, [lags], verbose=False)
return granger
| |
51,592 | 206,624 | 91 | django/utils/decorators.py | 47 | 4 | def _multi_decorate(decorators, method):
if hasattr(decorators, "__iter__") | Refs #33476 -- Reformatted code with Black. | _multi_decorate | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | decorators.py | 11 | 10 | https://github.com/django/django.git | 3 | 52 | 0 | 35 | 58 | Python | {
"docstring": "\n Decorate `method` with one or more function decorators. `decorators` can be\n a single decorator or an iterable of decorators.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 19,
"vocab_size": 17
} | def _multi_decorate(decorators, method):
if hasattr(decorators, "__iter__"):
# Apply a list/tuple of decorators if 'decorators' is one. Decorator
# functions are applied so that the call order is the same as the
# order in which they appear in the iterable.
decorators = decorato... | |
4,536 | 23,192 | 716 | ppocr/data/imaug/fce_targets.py | 191 | 49 | def generate_level_targets(self, img_size, text_polys, ignore_polys):
h, w = img_size
lv_size_divs = self.level_size_divisors
lv_proportion_range = self.level_proportion_range
lv_text_polys = [[] for i in range(len(lv_size_divs))]
lv_ignore_polys = [[] for i in | add fcenet | generate_level_targets | 9f62b610dea6161627200ed85d92e19b1923279a | PaddleOCR | fce_targets.py | 15 | 39 | https://github.com/PaddlePaddle/PaddleOCR.git | 10 | 384 | 0 | 96 | 586 | Python | {
"docstring": "Generate ground truth target on each level.\n\n Args:\n img_size (list[int]): Shape of input image.\n text_polys (list[list[ndarray]]): A list of ground truth polygons.\n ignore_polys (list[list[ndarray]]): A list of ignored polygons.\n Returns:\n ... | def generate_level_targets(self, img_size, text_polys, ignore_polys):
h, w = img_size
lv_size_divs = self.level_size_divisors
lv_proportion_range = self.level_proportion_range
lv_text_polys = [[] for i in range(len(lv_size_divs))]
lv_ignore_polys = [[] for i in range(len... | |
14,684 | 67,961 | 12 | erpnext/stock/stock_balance.py | 18 | 8 | def get_reserved_qty(item_code, warehouse):
reserved_qty = frappe.db.sql(
,
(item_code, warehouse, item_code, warehouse),
)
return flt( | style: format code with black | get_reserved_qty | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | stock_balance.py | 10 | 46 | https://github.com/frappe/erpnext.git | 2 | 43 | 0 | 17 | 62 | Python | {
"docstring": "\n\t\tselect\n\t\t\tsum(dnpi_qty * ((so_item_qty - so_item_delivered_qty) / so_item_qty))\n\t\tfrom\n\t\t\t(\n\t\t\t\t(select\n\t\t\t\t\tqty as dnpi_qty,\n\t\t\t\t\t(\n\t\t\t\t\t\tselect qty from `tabSales Order Item`\n\t\t\t\t\t\twhere name = dnpi.parent_detail_docname\n\t\t\t\t\t\tand (delivered_by_... | def get_reserved_qty(item_code, warehouse):
reserved_qty = frappe.db.sql(
,
(item_code, warehouse, item_code, warehouse),
)
return flt(reserved_qty[0][0]) if reserved_qty else 0
| |
23,229 | 108,518 | 21 | lib/matplotlib/pyplot.py | 15 | 2 | def cool():
set_cmap('cool | Cleanup documentation generation for pyplot
- remove the awkward `pyplot.plotting()` function, which only served
as a namespace to take up the docs for pyplot and output them via
`.. autofunction`
- Instead generate the same information using `.. autosummary::`. We
have to list the desired methods here explicitl... | cool | 032316bc6c7798fca6c82de24167c975f237687f | matplotlib | pyplot.py | 8 | 2 | https://github.com/matplotlib/matplotlib.git | 1 | 9 | 0 | 15 | 22 | Python | {
"docstring": "\n Set the colormap to 'cool'.\n\n This changes the default colormap as well as the colormap of the current\n image if there is one. See ``help(colormaps)`` for more information.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 28,
"vocab_size": 22
} | def cool():
set_cmap('cool')
# Autogenerated by boilerplate.py. Do not edit as changes will be lost. | |
52,627 | 209,158 | 32 | scapy/packet.py | 11 | 3 | def add_parent(self, parent):
| Add parent field to Packet (#3607)
Co-authored-by: Sergey Matsievskiy <matsievskiy@fastwel.ru> | add_parent | 6d7184e8bec5102dfa66bcc10432a30a7e0dcf3a | scapy | packet.py | 7 | 2 | https://github.com/secdev/scapy.git | 1 | 13 | 0 | 11 | 24 | Python | {
"docstring": "Set packet parent.\n When packet is an element in PacketListField, parent field would\n point to the list owner packet.",
"language": "en",
"n_whitespaces": 32,
"n_words": 19,
"vocab_size": 18
} | def add_parent(self, parent):
# type: (Packet) -> None
self.parent = parent
| |
52,377 | 208,534 | 35 | IPython/testing/tools.py | 9 | 7 | def make_tempfile(name):
open(name, 'w', encoding='utf-8 | Fix EncodingWarning on Python 3.10 | make_tempfile | 23276ac4770f380ce1d5808950dd412a35594af1 | ipython | tools.py | 11 | 6 | https://github.com/ipython/ipython.git | 2 | 31 | 0 | 9 | 59 | Python | {
"docstring": " Create an empty, named, temporary file for the duration of the context.\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 12,
"vocab_size": 11
} | def make_tempfile(name):
open(name, 'w', encoding='utf-8').close()
try:
yield
finally:
os.unlink(name)
| |
3,761 | 21,319 | 342 | pipenv/patched/notpip/_vendor/cachecontrol/controller.py | 120 | 21 | def update_cached_response(self, request, response):
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(request, self.cache.get(cache_url))
if not cached_response:
# we didn't have a cached response
return response
# Lets u... | Vendor in pip 22.1.2 | update_cached_response | c69d55f7c82d5ae2cce542bcfb98d043ca4836a0 | pipenv | controller.py | 13 | 16 | https://github.com/pypa/pipenv.git | 4 | 103 | 0 | 79 | 172 | Python | {
"docstring": "On a 304 we will get a new set of headers that we want to\n update our cached value with, assuming we have one.\n\n This should only ever be called when we've sent an ETag and\n gotten a 304 as the response.\n ",
"language": "en",
"n_whitespaces": 70,
"n_words": 42,
... | def update_cached_response(self, request, response):
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(request, self.cache.get(cache_url))
if not cached_response:
# we didn't have a cached response
return response
# Lets u... | |
1,541 | 9,099 | 32 | parsing/dml_csr/loss/lovasz_softmax.py | 21 | 9 | def binary_xloss(logits, labels, ignore=None):
l | Create lovasz_softmax.py | binary_xloss | db307ffb12d6ba1f8eaeeafd29ee6d4a3fd6fa97 | insightface | lovasz_softmax.py | 12 | 4 | https://github.com/deepinsight/insightface.git | 1 | 43 | 0 | 17 | 69 | Python | {
"docstring": "\n Binary Cross entropy loss\n logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)\n labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)\n ignore: void class id\n ",
"language": "en",
"n_whitespaces": 55,
"n_words": 33,
"vocab_size": 3... | def binary_xloss(logits, labels, ignore=None):
logits, labels = flatten_binary_scores(logits, labels, ignore)
loss = StableBCELoss()(logits, Variable(labels.float()))
return loss
# --------------------------- MULTICLASS LOSSES ---------------------------
| |
84,464 | 283,198 | 137 | build/pyinstaller/user_agent/base.py | 38 | 7 | def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None):
config = generate_navigator(
os=os, navigator=navigator, platform=platform, device_type=device_type
)
return {
"appCodeName": config["app_code_name"],
"appName": config["app_name"],
"ap... | Create a packaged app bundle with Pyinstaller (#1525)
* Add dashboard widget assets
* Add ipywidgets and ipyflex to project
* Add currencies dashboard notebook
* Update docs and docstrings
* Add pyinstaller to project deps
* Add pyinstaller artifacts to gitignore
* Fix linter errors in terminal.py
... | generate_navigator_js | ab4de1dd70fba866930150e440a03e461a6ca6a8 | OpenBBTerminal | base.py | 9 | 17 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 1 | 120 | 0 | 38 | 207 | Python | {
"docstring": "\n Generates web navigator's config with keys corresponding\n to keys of `windows.navigator` JavaScript object.\n\n :param os: limit list of oses for generation\n :type os: string or list/tuple or None\n :param navigator: limit list of browser engines for generation\n :type navigator... | def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None):
config = generate_navigator(
os=os, navigator=navigator, platform=platform, device_type=device_type
)
return {
"appCodeName": config["app_code_name"],
"appName": config["app_name"],
"ap... | |
9,893 | 49,719 | 212 | modules/image/text_to_image/disco_diffusion_cnclip_vitb16/cn_clip/clip/bert_tokenizer.py | 79 | 13 | def printable_text(text):
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
| add disco_diffusion_cnclip_vitb16 module | printable_text | f4d6e64cdc132ae868699a0ba442f4ab1d304a14 | PaddleHub | bert_tokenizer.py | 17 | 17 | https://github.com/PaddlePaddle/PaddleHub.git | 7 | 103 | 0 | 50 | 179 | Python | {
"docstring": "Returns text encoded in a way suitable for print or `tf.logging`.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def printable_text(text):
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("... | |
34,797 | 150,599 | 95 | freqtrade/freqai/prediction_models/RL/RLPrediction_env_v2.py | 31 | 10 | def is_tradesignal(self, action):
# trade signal
return not ((action == Actions.Neutral.value and self._position == Positions.Neutral)
| callback function and TDQN model added | is_tradesignal | 01232e9a1f8e28e3611e38af3816edb026600767 | freqtrade | RLPrediction_env_v2.py | 14 | 4 | https://github.com/freqtrade/freqtrade.git | 6 | 65 | 0 | 20 | 102 | Python | {
"docstring": "\n not trade signal is :\n Action: Neutral, position: Neutral -> Nothing \n Action: Long, position: Long -> Hold Long\n Action: Short, position: Short -> Hold Short\n ",
"language": "en",
"n_whitespaces": 62,
"n_words": 25,
"vocab_size": 16
} | def is_tradesignal(self, action):
# trade signal
return not ((action == Actions.Neutral.value and self._position == Positions.Neutral)
or (action == Actions.Short.value and self._position == Positions.Short)
or (action == Actions.Long.value and self._pos... | |
54,986 | 217,880 | 101 | python3.10.4/Lib/http/server.py | 13 | 9 | def log_message(self, format, *args):
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
| add python 3.10.4 for windows | log_message | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | server.py | 11 | 5 | https://github.com/XX-net/XX-Net.git | 1 | 37 | 0 | 12 | 62 | Python | {
"docstring": "Log an arbitrary message.\n\n This is used by all other logging functions. Override\n it if you have specific logging wishes.\n\n The first argument, FORMAT, is a format string for the\n message to be logged. If the format string contains\n any % escapes requiring ... | def log_message(self, format, *args):
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
| |
120,698 | 335,005 | 34 | src/diffusers/utils/logging.py | 16 | 10 | def warning_advice(self, *args, **kwargs):
no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False)
if no_advisory_warnings:
return
self.warning(*args, **kwargs)
logging.Logger.warning_advice = warning_advice
| changes comments and env vars in `utils/logging`
removes mentions of 🤗Transformers with 🤗Diffusers equivalent. | warning_advice | c3cc8eb23c8095217388d350409b454ea396c12b | diffusers | logging.py | 9 | 5 | https://github.com/huggingface/diffusers.git | 2 | 36 | 0 | 15 | 72 | Python | {
"docstring": "\n This method is identical to `logger.warninging()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this\n warning will not be printed\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 19,
"vocab_size": 18
} | def warning_advice(self, *args, **kwargs):
no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False)
if no_advisory_warnings:
return
self.warning(*args, **kwargs)
logging.Logger.warning_advice = warning_advice
| |
47,895 | 196,395 | 41 | sympy/matrices/repmatrix.py | 16 | 8 | def zip_row_op(self, i, k, f):
for j in range(self.cols):
self[i, j] = f | Moved imports to higher level | zip_row_op | 59d22b6bb7287613d598611027f640d068ca5748 | sympy | repmatrix.py | 11 | 3 | https://github.com/sympy/sympy.git | 2 | 45 | 0 | 16 | 64 | Python | {
"docstring": "In-place operation on row ``i`` using two-arg functor whose args are\n interpreted as ``(self[i, j], self[k, j])``.\n\n Examples\n ========\n\n >>> from sympy import eye\n >>> M = eye(3)\n >>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M\n Matrix([\n ... | def zip_row_op(self, i, k, f):
for j in range(self.cols):
self[i, j] = f(self[i, j], self[k, j])
| |
49,356 | 199,700 | 18 | sympy/polys/orthopolys.py | 13 | 7 | def legendre_poly(n, x=None, polys=False):
r
return named_poly(n, dup_legendre, QQ, "Legendre polynomial", (x,), polys)
| Run orthopolys and appellseqs through a common interface
Including unifying the two Chebyshev generators into one function.
There are also two kinds of Hermite polynomials, and they too share the
same recurrence, but the second type He_n(x) (aka the probabilist,
reduced or small polynomials) will not be added here. | legendre_poly | d1d46df73ebaad94089847558d00a8b7269f554d | sympy | orthopolys.py | 8 | 13 | https://github.com/sympy/sympy.git | 1 | 33 | 0 | 13 | 47 | Python | {
"docstring": "Generates the Legendre polynomial `P_n(x)`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ",
"language": "en",
"n_whitespaces": 63,
... | def legendre_poly(n, x=None, polys=False):
r
return named_poly(n, dup_legendre, QQ, "Legendre polynomial", (x,), polys)
| |
118,084 | 322,189 | 495 | paddlenlp/taskflow/knowledge_mining.py | 91 | 19 | def _concat_short_text_reuslts(self, input_texts, results):
long_text_lens = [len(text) for text in input_texts]
concat_results = []
single_results = {}
count = 0
for text in input_texts:
text_len = len(text)
while True:
if len(sin... | Update neural search readme and Add Paddle Serving Support (#1558)
* add recall inference similarity
* update examples
* updatea readme
* update dir name
* update neural search readme
* update milvus readme
* update domain adaptive pretraining readme
* fix the mistakes
* update readme
* add ... | _concat_short_text_reuslts | 621357338437ee420eabbbf5ab19065bc85e73a5 | PaddleNLP | knowledge_mining.py | 18 | 28 | https://github.com/PaddlePaddle/PaddleNLP.git | 9 | 172 | 0 | 59 | 289 | Python | {
"docstring": "\n Concat the model output of short texts to the total result of long text.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 14,
"vocab_size": 12
} | def _concat_short_text_reuslts(self, input_texts, results):
long_text_lens = [len(text) for text in input_texts]
concat_results = []
single_results = {}
count = 0
for text in input_texts:
text_len = len(text)
while True:
if len(sin... | |
53,046 | 211,216 | 541 | ppdet/modeling/heads/s2anet_head.py | 191 | 43 | def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.e... | refactor s2anet (#6604)
* refactor s2anet to support batch_size > 1
* fix problem of inference
* support batch_size > 1 for training
* fix empty results
* fix dota eval
* fix configs of s2anet_head
* modify s2anet_spine_1x to 73 mAP | get_pred | b4727677751081b257c6fa23c3c124ab9e5a32a1 | PaddleDetection | s2anet_head.py | 13 | 36 | https://github.com/PaddlePaddle/PaddleDetection.git | 2 | 466 | 0 | 103 | 682 | Python | {
"docstring": "\n Rescale, clip and filter the bbox from the output of NMS to\n get final prediction.\n Args:\n bboxes(Tensor): bboxes [N, 10]\n bbox_num(Tensor): bbox_num\n im_shape(Tensor): [1 2]\n scale_factor(Tensor): [1 2]\n Returns:\n ... | def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.e... | |
13,733 | 64,834 | 46 | erpnext/accounts/doctype/chart_of_accounts_importer/chart_of_accounts_importer.py | 65 | 13 | def unset_existing_data(company):
linked = frappe.db.sql(
,
as_dict=True,
)
# remove accounts data from company
update_values = {d.fieldname: "" for d in linked}
frappe.db.set_value("Company", company, update_values, update_values)
# remove accounts data from various doctypes
for doctype in [
"Account",
... | style: format code with black | unset_existing_data | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | chart_of_accounts_importer.py | 13 | 19 | https://github.com/frappe/erpnext.git | 3 | 82 | 0 | 48 | 140 | Python | {
"docstring": "select fieldname from tabDocField\n\t\twhere fieldtype=\"Link\" and options=\"Account\" and parent=\"Company\"delete from `tab{0}` where `company`=\"%s\"",
"language": "en",
"n_whitespaces": 12,
"n_words": 14,
"vocab_size": 11
} | def unset_existing_data(company):
linked = frappe.db.sql(
,
as_dict=True,
)
# remove accounts data from company
update_values = {d.fieldname: "" for d in linked}
frappe.db.set_value("Company", company, update_values, update_values)
# remove accounts data from various doctypes
for doctype in [
"Account",
... | |
70,390 | 244,465 | 55 | mmdet/models/dense_heads/base_dense_head.py | 16 | 9 | def simple_test(self, feats, batch_img_metas, rescale=False):
outs = self.forward(feats)
results_list = self.get_results(
*outs, batch_img_metas=batch_img_metas, rescale=rescale)
return results_list
| Modify RetinaNet model interface | simple_test | 924c381a78eb70cede198e042ef34e038e05c15a | mmdetection | base_dense_head.py | 9 | 5 | https://github.com/open-mmlab/mmdetection.git | 1 | 41 | 0 | 14 | 63 | Python | {
"docstring": "Test function without test-time augmentation.\n\n Args:\n feats (tuple[torch.Tensor]): Multi-level features from the\n upstream network, each is a 4D-tensor.\n batch_img_metas (list[dict]): List of image information.\n rescale (bool, optional): Wh... | def simple_test(self, feats, batch_img_metas, rescale=False):
outs = self.forward(feats)
results_list = self.get_results(
*outs, batch_img_metas=batch_img_metas, rescale=rescale)
return results_list
| |
5,678 | 31,086 | 167 | src/transformers/commands/pt_to_tf.py | 59 | 13 | def compare_pt_tf_models(pt_model, pt_input, tf_model, tf_input):
pt_outputs = pt_model(**pt_input, output_hidden_states=True)
tf_outputs = tf_model(**tf_input, output_hidden_states=True)
# 1. All output attributes must be the same
pt_out_attrs = set(pt_outputs.keys())
| CLI: add stricter automatic checks to `pt-to-tf` (#17588)
* Stricter pt-to-tf checks; Update docker image for related tests
* check all attributes in the output
Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> | compare_pt_tf_models | 78c695eb624bc863ea165b6fb0a8850bfd9fcefa | transformers | pt_to_tf.py | 12 | 12 | https://github.com/huggingface/transformers.git | 2 | 76 | 0 | 48 | 119 | Python | {
"docstring": "\n Compares the TensorFlow and PyTorch models, given their inputs, returning a tuple with the maximum observed\n difference and its source.\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 20,
"vocab_size": 18
} | def compare_pt_tf_models(pt_model, pt_input, tf_model, tf_input):
pt_outputs = pt_model(**pt_input, output_hidden_states=True)
tf_outputs = tf_model(**tf_input, output_hidden_states=True)
# 1. All output attributes must be the same
pt_out_attrs = set(pt_outputs.keys())
... | |
@pytest.fixture(
params=_get_all_parser_float_precision_combinations()["params"],
ids=_get_all_parser_float_precision_combinations()["ids"],
) | 39,627 | 165,035 | 174 | pandas/tests/io/parser/conftest.py | 64 | 20 | def _get_all_parser_float_precision_combinations():
params = []
ids = []
for parser, parser_id in zip(_all_parsers, _all_parser_ids):
if hasattr(parser, "values"):
# Wrapped in pytest.param, get the actual parser back
parser = parser.values[0]
for precision in pa... | CI: Add single_cpu build (#45995) | _get_all_parser_float_precision_combinations | 08104e8c0a80579dfe3e984b32b35ddc94aafa01 | pandas | conftest.py | 15 | 12 | https://github.com/pandas-dev/pandas.git | 5 | 105 | 1 | 51 | 223 | Python | {
"docstring": "\n Return all allowable parser and float precision\n combinations and corresponding ids.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 11,
"vocab_size": 10
} | def _get_all_parser_float_precision_combinations():
params = []
ids = []
for parser, parser_id in zip(_all_parsers, _all_parser_ids):
if hasattr(parser, "values"):
# Wrapped in pytest.param, get the actual parser back
parser = parser.values[0]
for precision in pa... |
49,846 | 201,071 | 86 | tests/app_loading/tests.py | 17 | 12 | def test_egg3(self):
egg_name = "%s/omelet.egg" % self.egg_dir
with extend_sys_path(egg_name):
| Refs #33476 -- Reformatted code with Black. | test_egg3 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 14 | 7 | https://github.com/django/django.git | 1 | 54 | 0 | 15 | 101 | Python | {
"docstring": "Models module can be loaded from an app located under an egg's top-level package",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 13
} | def test_egg3(self):
egg_name = "%s/omelet.egg" % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=["omelet.app_with_models"]):
models_module = apps.get_app_config("app_with_models").models_module
self.assertIsNotNone(mod... | |
@pytest.mark.parametrize("setting", ["Enabled", "Disabled"]) | 54,437 | 216,155 | 29 | tests/pytests/functional/modules/win_lgpo/test_audit_settings_module.py | 18 | 11 | def test_auditing_case_names(lgpo, setting_name, setting, enable_legacy_auditing):
lgpo.set_computer_policy(setting_name, setting)
result = lgpo.get_policy(setting_name, "machine")
assert result == setting
@pytest.mark.parametrize("setting", ["Enabled", | Add and update tests | test_auditing_case_names | 0e69e2317dfa06f114c6dd63bc70c68fc81d19b1 | salt | test_audit_settings_module.py | 9 | 4 | https://github.com/saltstack/salt.git | 1 | 34 | 1 | 17 | 82 | Python | {
"docstring": "\n Helper function to set an audit setting and assert that it was successful\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 13,
"vocab_size": 13
} | def test_auditing_case_names(lgpo, setting_name, setting, enable_legacy_auditing):
lgpo.set_computer_policy(setting_name, setting)
result = lgpo.get_policy(setting_name, "machine")
assert result == setting
@pytest.mark.parametrize("setting", ["Enabled", "Disabled"]) |
16,397 | 75,348 | 138 | wagtail/images/tests/tests.py | 31 | 15 | def test_get(self):
# Generate signature
signature = generate_signature(self.image.id, "fill-800x600")
# Get the image
response = self.client.get(
reverse(
"wagtailimages_serve", args=(signature, self.image.id, "fill-800x600")
)
)... | Reformat with black | test_get | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | tests.py | 14 | 10 | https://github.com/wagtail/wagtail.git | 1 | 74 | 0 | 24 | 127 | Python | {
"docstring": "\n Test a valid GET request to the view\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 8
} | def test_get(self):
# Generate signature
signature = generate_signature(self.image.id, "fill-800x600")
# Get the image
response = self.client.get(
reverse(
"wagtailimages_serve", args=(signature, self.image.id, "fill-800x600")
)
)... | |
341 | 2,705 | 149 | packages/syft/src/syft/lib/python/slice.py | 34 | 15 | def _object2proto(self) -> Slice_PB:
slice_pb | change[syft.lib.python] syft import absolute -> relative | _object2proto | e5bfcd8016813b1d253a72da5c5071b0e0965644 | PySyft | slice.py | 11 | 19 | https://github.com/OpenMined/PySyft.git | 4 | 81 | 0 | 23 | 133 | Python | {
"docstring": "\n Serialize the Slice object instance returning a protobuf.\n\n Returns:\n Slice_PB: returns a protobuf object class representing this Slice object.\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 19,
"vocab_size": 16
} | def _object2proto(self) -> Slice_PB:
slice_pb = Slice_PB()
if self.start:
slice_pb.start = self.start
slice_pb.has_start = True
if self.stop:
slice_pb.stop = self.stop
slice_pb.has_stop = True
if self.step:
slice_pb.s... | |
21,008 | 101,599 | 158 | plugins/extract/recognition/vgg_face2_keras.py | 36 | 13 | def __call__(self) -> List[Tuple[int, int]]:
logger.info("Sorting face distances. Depending on your dataset this may take some time...")
if self._threshold:
self._threshold = self._result_linkage[:, 2].max() * self._threshold
result_order = self._seriation(self._result_linka... | Overhaul sort:
- Standardize image data reading and writing
- Optimize loading (just one pass required)
- Make all sort groups binnable (to greater or lesser results)
- Add sort by pitch
- Deprecate multiple options
- linting, docs + locales | __call__ | 98d01760e469fd2108eed8d0b0a1ba6297c3177c | faceswap | vgg_face2_keras.py | 13 | 19 | https://github.com/deepfakes/faceswap.git | 2 | 73 | 0 | 32 | 115 | Python | {
"docstring": " Process the linkages.\n\n Transforms a distance matrix into a sorted distance matrix according to the order implied\n by the hierarchical tree (dendrogram).\n\n Returns\n -------\n list:\n List of indices with the order implied by the hierarchical tree or... | def __call__(self) -> List[Tuple[int, int]]:
logger.info("Sorting face distances. Depending on your dataset this may take some time...")
if self._threshold:
self._threshold = self._result_linkage[:, 2].max() * self._threshold
result_order = self._seriation(self._result_linka... | |
53,084 | 211,402 | 1,144 | ppdet/modeling/post_process.py | 292 | 58 | def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
if self.export_eb:
# enable rcnn models for edgeboard hw to skip the following postprocess.
return bboxes, bboxes, bbox_num
if not self.export_onnx:
bboxes_list = []
bbox_num_list = []... | add flag skipping postprocess to support edgeboard hardware (#6719)
* add flag skipping postprocess to support edgeboard hardware
* add flag skipping postprocess to support edgeboard hardware
* add flag skipping postprocess to support edgeboard hardware
* add comment for the flag export_eb | get_pred | b41194eaed10a01409451e4d3ea7f8b4812cdd23 | PaddleDetection | post_process.py | 17 | 62 | https://github.com/PaddlePaddle/PaddleDetection.git | 7 | 651 | 0 | 171 | 961 | Python | {
"docstring": "\n Rescale, clip and filter the bbox from the output of NMS to \n get final prediction. \n\n Notes:\n Currently only support bs = 1.\n\n Args:\n bboxes (Tensor): The output bboxes with shape [N, 6] after decode\n and NMS, including labels, s... | def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
if self.export_eb:
# enable rcnn models for edgeboard hw to skip the following postprocess.
return bboxes, bboxes, bbox_num
if not self.export_onnx:
bboxes_list = []
bbox_num_list = []... | |
99,894 | 301,046 | 18 | homeassistant/components/nexia/entity.py | 4 | 7 | def _signal_zone_update(self):
async_dispatcher_send(self.hass, f"{SIGNAL_ZONE_UPD | Update nexia to use asyncio (#72108) | _signal_zone_update | d8a580a90f8bf3206b31619493f4e653fceb3f4b | core | entity.py | 11 | 2 | https://github.com/home-assistant/core.git | 1 | 15 | 0 | 4 | 41 | Python | {
"docstring": "Signal a zone update.\n\n Whenever the underlying library does an action against\n a zone, the data for the zone is updated.\n\n Update a single zone.\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 25,
"vocab_size": 20
} | def _signal_zone_update(self):
async_dispatcher_send(self.hass, f"{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}")
| |
@pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])]) | 75,518 | 259,003 | 287 | sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py | 133 | 39 | def test_asymmetric_error(quantile):
n_samples = 10_000
rng = np.random.RandomState(42)
# take care that X @ coef + intercept > 0
X = np.concatenate(
(
np.abs(rng.randn(n_samples)[:, None]),
-rng.randint(2, size=(n_samples, 1)),
),
a | FEA add quantile HGBT (#21800) | test_asymmetric_error | 5ad3421a5b5759ecfaaab93406592d988f5d487f | scikit-learn | test_gradient_boosting.py | 14 | 27 | https://github.com/scikit-learn/scikit-learn.git | 1 | 209 | 1 | 96 | 361 | Python | {
"docstring": "Test quantile regression for asymmetric distributed targets.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | def test_asymmetric_error(quantile):
n_samples = 10_000
rng = np.random.RandomState(42)
# take care that X @ coef + intercept > 0
X = np.concatenate(
(
np.abs(rng.randn(n_samples)[:, None]),
-rng.randint(2, size=(n_samples, 1)),
),
axis=1,
)
i... |
88,424 | 289,281 | 1,402 | homeassistant/components/gtfs/sensor.py | 259 | 46 | def update(self) -> None:
with self.lock:
# Fetch valid stop information once
if not self._origin:
stops = self._pygtfs.stops_by_id(self.origin)
if not stops:
self._available = False
_LOGGER.warning("Origin ... | Move attribution to standalone attribute [e-g] (#80513) | update | c717fd19de01fc822d146cc5e353959dfa86d5f7 | core | sensor.py | 17 | 72 | https://github.com/home-assistant/core.git | 18 | 420 | 0 | 141 | 727 | Python | {
"docstring": "Get the latest data from GTFS and update the states.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def update(self) -> None:
with self.lock:
# Fetch valid stop information once
if not self._origin:
stops = self._pygtfs.stops_by_id(self.origin)
if not stops:
self._available = False
_LOGGER.warning("Origin ... | |
95,240 | 296,245 | 102 | tests/components/homekit_controller/test_binary_sensor.py | 41 | 16 | async def test_carbon_monoxide_sensor_read_state(hass, utcnow):
helper = await setup_test_component(hass, create_carbon_monoxide_sensor_service)
await helper.async_update(
ServicesTypes.CARBON_MONOXIDE_SENSOR,
{CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 0},
)
state = await help... | Fix HomeKit Controller device class for CO Sensors (#69949) | test_carbon_monoxide_sensor_read_state | ad5d7a845b73b6ef09b111597d6c542be4781b07 | core | test_binary_sensor.py | 11 | 15 | https://github.com/home-assistant/core.git | 1 | 92 | 0 | 24 | 152 | Python | {
"docstring": "Test that we can read the state of a HomeKit contact accessory.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | async def test_carbon_monoxide_sensor_read_state(hass, utcnow):
helper = await setup_test_component(hass, create_carbon_monoxide_sensor_service)
await helper.async_update(
ServicesTypes.CARBON_MONOXIDE_SENSOR,
{CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 0},
)
state = await help... | |
@dataclass | 1,224 | 7,512 | 308 | ludwig/utils/triton_utils.py | 52 | 33 | def save_config(self) -> TritonArtifact:
device = self.device
if self.inference_stage != PREDICTOR:
device = "cpu"
self.config = TritonConfig(
self.full_model_name,
self.input_features,
self.output_features,
self.max_batch_size... | Triton ensemble export (#2251) | save_config | ed8d9cf20843744f18593b22fb6a30eaf5f325eb | ludwig | triton_utils.py | 13 | 31 | https://github.com/ludwig-ai/ludwig.git | 2 | 144 | 1 | 44 | 231 | Python | {
"docstring": "Save the Triton config.\n\n Return the appropriate artifact.\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 8,
"vocab_size": 7
} | def save_config(self) -> TritonArtifact:
device = self.device
if self.inference_stage != PREDICTOR:
device = "cpu"
self.config = TritonConfig(
self.full_model_name,
self.input_features,
self.output_features,
self.max_batch_size... |
72,740 | 249,236 | 258 | tests/rest/admin/test_device.py | 50 | 17 | def test_unknown_device(self) -> None:
url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote(
self.other_user
)
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.a... | Use literals in place of `HTTPStatus` constants in tests (#13479)
Replace
- `HTTPStatus.NOT_FOUND`
- `HTTPStatus.FORBIDDEN`
- `HTTPStatus.UNAUTHORIZED`
- `HTTPStatus.CONFLICT`
- `HTTPStatus.CREATED`
Signed-off-by: Dirk Klimpel <dirk@klimpel.org> | test_unknown_device | 1595052b2681fb86c1c1b9a6028c1bc0d38a2e4b | synapse | test_device.py | 10 | 26 | https://github.com/matrix-org/synapse.git | 1 | 136 | 0 | 31 | 212 | Python | {
"docstring": "\n Tests that a lookup for a device that does not exist returns either 404 or 200.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 16,
"vocab_size": 14
} | def test_unknown_device(self) -> None:
url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote(
self.other_user
)
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.a... | |
6,716 | 37,029 | 317 | examples/research_projects/codeparrot/scripts/human_eval.py | 96 | 46 | def complete_code(accelerator, model, tokenizer, dataloader, n_tasks, batch_size=20, **gen_kwargs):
gen_token_dict = defaultdict(list) # dict of list of generated tokens
for step, batch in tqdm(enumerate(dataloader)):
with torch.no_grad():
gen_kwargs["stopping_criteria"][0].start_lengt... | Jia multi gpu eval (#16428)
* add simple multi gpu complet
* add human_eval_multi_gpu
* use copy strategy to distribute across gpu, to avoid padding
* add doc string
* update code style
* use task id to arrange output
* truncate input to avoid zero pad
* Stop the copy mechanism
* update style
... | complete_code | 4868a830db5f19f56712f540979d637368221d50 | transformers | human_eval.py | 17 | 23 | https://github.com/huggingface/transformers.git | 6 | 246 | 0 | 66 | 387 | Python | {
"docstring": "Generate multiple codes for each task in the dataset. This function leverage accelerator to distribute\n the processing to multiple GPUs.\n dataloader, a wrapper around a TokenizeDataset objectm is supposed to send all the prompts from\n the evalution dataset to the modelm as the following:\n... | def complete_code(accelerator, model, tokenizer, dataloader, n_tasks, batch_size=20, **gen_kwargs):
gen_token_dict = defaultdict(list) # dict of list of generated tokens
for step, batch in tqdm(enumerate(dataloader)):
with torch.no_grad():
gen_kwargs["stopping_criteria"][0].start_lengt... | |
13,993 | 65,710 | 20 | erpnext/crm/doctype/contract/contract.py | 27 | 6 | def get_status(start_date, end_date):
if not end_date:
return "Active"
start_date = getdate(start_date)
end_date = getdate(end_date)
now_date = getdate(nowdate())
return "Active" if start_date <= now_date <= end_date els | style: format code with black | get_status | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | contract.py | 10 | 7 | https://github.com/frappe/erpnext.git | 3 | 44 | 0 | 18 | 78 | Python | {
"docstring": "\n\tGet a Contract's status based on the start, current and end dates\n\n\tArgs:\n\t start_date (str): The start date of the contract\n\t end_date (str): The end date of the contract\n\n\tReturns:\n\t str: 'Active' if within range, otherwise 'Inactive'\n\t",
"language": "en",
... | def get_status(start_date, end_date):
if not end_date:
return "Active"
start_date = getdate(start_date)
end_date = getdate(end_date)
now_date = getdate(nowdate())
return "Active" if start_date <= now_date <= end_date else "Inactive"
| |
2,946 | 19,358 | 40 | PathPlanning/CubicSpline/cubic_spline_planner.py | 12 | 7 | def calc_position(self, s):
x = self.sx.calc_position(s)
y = self.sy.calc_positi | enhance cubic spline path doc (#698)
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path doc
* enhance cublic spline path d... | calc_position | def289b723e9216830c2a7b2577cb31b55710167 | PythonRobotics | cubic_spline_planner.py | 9 | 4 | https://github.com/AtsushiSakai/PythonRobotics.git | 1 | 32 | 0 | 10 | 53 | Python | {
"docstring": "\n calc position\n\n Parameters\n ----------\n s : float\n distance from the start point. if `s` is outside the data point's\n range, return None.\n\n Returns\n -------\n x : float\n x position for given s.\n y : ... | def calc_position(self, s):
x = self.sx.calc_position(s)
y = self.sy.calc_position(s)
return x, y
| |
56,482 | 221,712 | 156 | python3.10.4/Lib/contextlib.py | 55 | 10 | def push(self, exit):
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods.
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, ... | add python 3.10.4 for windows | push | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | contextlib.py | 10 | 9 | https://github.com/XX-net/XX-Net.git | 2 | 42 | 0 | 46 | 75 | Python | {
"docstring": "Registers a callback with the standard __exit__ method signature.\n\n Can suppress exceptions the same way __exit__ method can.\n Also accepts any object with an __exit__ method (registering a call\n to the method instead of the object itself).\n ",
"language": "en",
"n... | def push(self, exit):
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods.
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, ... | |
104,431 | 305,647 | 57 | homeassistant/components/mpd/media_player.py | 14 | 6 | async def async_media_play(self) -> None:
if se | Improve entity type hints [m] (#77816) | async_media_play | 6355e682fa4aeb526570597d919ad1fb76755b9a | core | media_player.py | 12 | 6 | https://github.com/home-assistant/core.git | 2 | 37 | 0 | 13 | 69 | Python | {
"docstring": "Service to send the MPD the command for play/pause.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 8
} | async def async_media_play(self) -> None:
if self._status["state"] == "pause":
await self._client.pause(0)
else:
await self._client.play()
| |
14,439 | 67,193 | 70 | erpnext/regional/report/datev/datev.py | 109 | 38 | def download_datev_csv(filters):
if isinstance(filters, str):
filters = json.loads(filters)
validate(filters)
company = filters.get("company")
fiscal_year = get_fiscal_year(date=filters.get("from_date"), company=company)
filters["fiscal_year_start"] = fiscal_year[1]
# set chart of accounts used
coa = frap... | style: format code with black | download_datev_csv | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | datev.py | 13 | 38 | https://github.com/frappe/erpnext.git | 4 | 248 | 0 | 74 | 421 | Python | {
"docstring": "\n\tProvide accounting entries for download in DATEV format.\n\n\tValidate the filters, get the data, produce the CSV file and provide it for\n\tdownload. Can be called like this:\n\n\tGET /api/method/erpnext.regional.report.datev.datev.download_datev_csv\n\n\tArguments / Params:\n\tfilters -- dict of... | def download_datev_csv(filters):
if isinstance(filters, str):
filters = json.loads(filters)
validate(filters)
company = filters.get("company")
fiscal_year = get_fiscal_year(date=filters.get("from_date"), company=company)
filters["fiscal_year_start"] = fiscal_year[1]
# set chart of accounts used
coa = frap... | |
7,606 | 42,544 | 55 | nltk/parse/util.py | 28 | 10 | def taggedsent_to_conll(sentence):
| Docstring tests (#3050)
* fixed pytests
* fixed more pytests
* fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py
* fixed pytests (mainly multiline or rounding issues)
* fixed treebank pytests, removed test for return_string=True (deprecated)
* fixed destructive.py... | taggedsent_to_conll | 8a4cf5d94eb94b6427c5d1d7907ba07b119932c5 | nltk | util.py | 12 | 5 | https://github.com/nltk/nltk.git | 2 | 64 | 0 | 22 | 109 | Python | {
"docstring": "\n A module to convert a single POS tagged sentence into CONLL format.\n\n >>> from nltk import word_tokenize, pos_tag\n >>> text = \"This is a foobar sentence.\"\n >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))): # doctest: +NORMALIZE_WHITESPACE\n ... \tprint(line, en... | def taggedsent_to_conll(sentence):
for (i, (word, tag)) in enumerate(sentence, start=1):
input_str = [str(i), word, "_", tag, tag, "_", "0", "a", "_", "_"]
input_str = "\t".join(input_str) + "\n"
yield input_str
| |
51,596 | 206,637 | 51 | django/utils/encoding.py | 19 | 8 | def get_system_encoding():
try:
encoding = locale.getdefaultlocale()[1] or "ascii"
codecs.lookup(en | Refs #33476 -- Reformatted code with Black. | get_system_encoding | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | encoding.py | 12 | 7 | https://github.com/django/django.git | 3 | 33 | 0 | 14 | 71 | Python | {
"docstring": "\n The encoding of the default system locale. Fallback to 'ascii' if the\n #encoding is unsupported by Python or could not be determined. See tickets\n #10335 and #5846.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 27,
"vocab_size": 26
} | def get_system_encoding():
try:
encoding = locale.getdefaultlocale()[1] or "ascii"
codecs.lookup(encoding)
except Exception:
encoding = "ascii"
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
| |
16,728 | 77,977 | 47 | wagtail/contrib/modeladmin/options.py | 11 | 9 | def get_menu_item(self):
if self.modeladmin_instances:
submenu = Menu(items=self.g | Deprecate wagtail.contrib.modeladmin.menus.SubMenu in favour of wagtail.admin.menu.Menu
The Menu class was not originally designed to accept menu items at constructor time (instead requiring them to be passed via hooks); ModelAdmin's SubMenu class patched this functionality in, and the documentation for extending admi... | get_menu_item | b8a9a2d319b06fc2318d68d05b5a6cdf85b5b33d | wagtail | options.py | 13 | 4 | https://github.com/wagtail/wagtail.git | 2 | 36 | 0 | 11 | 60 | Python | {
"docstring": "\n Utilised by Wagtail's 'register_menu_item' hook to create a menu\n for this group with a submenu linking to listing pages for any\n associated ModelAdmin instances\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 24,
"vocab_size": 21
} | def get_menu_item(self):
if self.modeladmin_instances:
submenu = Menu(items=self.get_submenu_items())
return GroupMenuItem(self, self.get_menu_order(), submenu)
| |
24,258 | 110,702 | 73 | lib/matplotlib/backend_bases.py | 27 | 17 | def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
path, transform = self._get_text_path_transform(
| Soft deprecate the textpath module (import from text instead)
The textpath module was created in 2009, but the status has
been a bit vague with many examples and exisiting code found
on the internet importing from text instead.
In this PR everything is changed to point at text, although textpath
is still available fo... | _draw_text_as_path | 9b8a598d00a4fcf9579415586053583ef80a1add | matplotlib | backend_bases.py | 8 | 6 | https://github.com/matplotlib/matplotlib.git | 1 | 69 | 0 | 20 | 94 | Python | {
"docstring": "\n Draw the text by converting them to paths using `.TextToPath`.\n\n Parameters\n ----------\n x : float\n The x location of the text in display coords.\n y : float\n The y location of the text baseline in display coords.\n s : str\n ... | def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
path, transform = self._get_text_path_transform(
x, y, s, prop, angle, ismath)
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
| |
6,912 | 38,114 | 341 | examples/research_projects/lxmert/modeling_frcnn.py | 128 | 27 | def __call__(self, match_quality_matrix):
assert match_quality_matrix.dim() == 2
if match_quality_matrix.numel() == 0:
default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64)
# When no gt boxes exist, we define IOU = 0 and t... | Black preview (#17217)
* Black preview
* Fixup too!
* Fix check copies
* Use the same version as the CI
* Bump black | __call__ | afe5d42d8d1d80af911ed980c2936bfe887078f6 | transformers | modeling_frcnn.py | 13 | 17 | https://github.com/huggingface/transformers.git | 4 | 190 | 0 | 100 | 294 | Python | {
"docstring": "\n Args:\n match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted\n elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_match... | def __call__(self, match_quality_matrix):
assert match_quality_matrix.dim() == 2
if match_quality_matrix.numel() == 0:
default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64)
# When no gt boxes exist, we define IOU = 0 and t... | |
else: | 55,318 | 218,453 | 25 | python3.10.4/Lib/inspect.py | 9 | 6 | def ismemberdescriptor(object):
return isinstance(object, types.MemberDescriptorType)
else:
# | add python 3.10.4 for windows | ismemberdescriptor | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | inspect.py | 8 | 2 | https://github.com/XX-net/XX-Net.git | 1 | 15 | 1 | 9 | 30 | Python | {
"docstring": "Return true if the object is a member descriptor.\n\n Member descriptors are specialized descriptors defined in extension\n modules.",
"language": "en",
"n_whitespaces": 31,
"n_words": 18,
"vocab_size": 17
} | def ismemberdescriptor(object):
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations |
35,336 | 153,268 | 142 | modin/core/dataframe/base/exchange/dataframe_protocol/utils.py | 62 | 22 | def pandas_dtype_to_arrow_c(dtype) -> str:
if isinstance(dtype, pandas.CategoricalDtype):
return ArrowCTypes.INT64
elif dtype == np.dtype("O"):
return ArrowCTypes.STRING
format_str = getattr(ArrowCTypes, dtype.name.upper(), None)
if format_str is not None:
return format_str... | FEAT-#4245: Define base interface for dataframe exchange protocol (#4246)
Signed-off-by: Igoshev, Yaroslav <yaroslav.igoshev@intel.com>
Co-authored-by: Dmitry Chigarev <dmitry.chigarev@intel.com> | pandas_dtype_to_arrow_c | fc539c3d70a40c9d7aabc5c50dd7280aa5e4637e | modin | utils.py | 13 | 27 | https://github.com/modin-project/modin.git | 5 | 107 | 0 | 48 | 177 | Python | {
"docstring": "\n Represent pandas `dtype` as a format string in Apache Arrow C notation.\n\n Parameters\n ----------\n dtype : np.dtype\n Datatype of pandas DataFrame to represent.\n\n Returns\n -------\n str\n Format string in Apache Arrow C notation of the given `dtype`.\n ",... | def pandas_dtype_to_arrow_c(dtype) -> str:
if isinstance(dtype, pandas.CategoricalDtype):
return ArrowCTypes.INT64
elif dtype == np.dtype("O"):
return ArrowCTypes.STRING
format_str = getattr(ArrowCTypes, dtype.name.upper(), None)
if format_str is not None:
return format_str... | |
40,759 | 172,105 | 32 | pandas/core/dtypes/inference.py | 16 | 4 | def is_file_like(obj) -> bool:
if not (hasattr(obj, "read") or hasattr(obj, "write")):
return False
| Fix some dosctring RT02 error (#50197) | is_file_like | bce995817caf00ab5e82cb4cf1b540f1530cf4ea | pandas | inference.py | 11 | 32 | https://github.com/pandas-dev/pandas.git | 3 | 38 | 0 | 15 | 67 | Python | {
"docstring": "\n Check if the object is a file-like object.\n\n For objects to be considered file-like, they must\n be an iterator AND have either a `read` and/or `write`\n method as an attribute.\n\n Note: file-like objects must be iterable, but\n iterable objects need not be file-like.\n\n Pa... | def is_file_like(obj) -> bool:
if not (hasattr(obj, "read") or hasattr(obj, "write")):
return False
return bool(hasattr(obj, "__iter__"))
| |
645 | 4,252 | 23 | octavia-cli/octavia_cli/apply/resources.py | 9 | 9 | def update(self) -> Union[SourceRead, DestinationRead, ConnectionRead]:
return self._create_or_update(self._update_fn, self.update_payload)
| 🐙 octavia-cli: `apply` connections (#10881) | update | 56bf982cb96f831fe04f5e44a92ee4a669b9e16a | airbyte | resources.py | 8 | 7 | https://github.com/airbytehq/airbyte.git | 1 | 28 | 0 | 9 | 43 | Python | {
"docstring": "Public function to update the resource on the remote Airbyte instance.\n\n Returns:\n Union[SourceRead, DestinationRead, ConnectionRead]: The updated resource.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 18,
"vocab_size": 17
} | def update(self) -> Union[SourceRead, DestinationRead, ConnectionRead]:
return self._create_or_update(self._update_fn, self.update_payload)
| |
71,802 | 247,638 | 347 | tests/handlers/test_oidc.py | 90 | 17 | def test_callback_session(self) -> None:
request = Mock(spec=["args", "getCookie", "cookies"])
# Missing cookie
request.args = {}
request.getCookie.return_value = None
self.get_success(self.handler.handle_oidc_callback(request))
self.assertRenderedError("missing... | Add type hints to some tests/handlers files. (#12224) | test_callback_session | 5dd949bee6158a8b651db9f2ae417a62c8184bfd | synapse | test_oidc.py | 11 | 31 | https://github.com/matrix-org/synapse.git | 1 | 241 | 0 | 42 | 419 | Python | {
"docstring": "The callback verifies the session presence and validity",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def test_callback_session(self) -> None:
request = Mock(spec=["args", "getCookie", "cookies"])
# Missing cookie
request.args = {}
request.getCookie.return_value = None
self.get_success(self.handler.handle_oidc_callback(request))
self.assertRenderedError("missing... | |
11,867 | 59,100 | 140 | src/prefect/filesystems.py | 35 | 18 | def _create_repo_url(self) -> str:
url_components = urllib.parse.urlparse(self.repository_url)
if url_components.scheme == "https" and self.credentials is not None:
repo_url = url_components.netloc + url_components.path
updated_components = url_components._replace(
... | Add private repos | _create_repo_url | bbee097653559003fa0db61ab00f1ff8567eea9a | prefect | filesystems.py | 16 | 16 | https://github.com/PrefectHQ/prefect.git | 3 | 73 | 0 | 29 | 142 | Python | {
"docstring": "Format the URL provided to the `git clone` command.\n\n For private repos: https://<oauth-key>@github.com/<username>/<repo>.git\n All other repos should be the same as `self.repository`.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 22,
"vocab_size": 20
} | def _create_repo_url(self) -> str:
url_components = urllib.parse.urlparse(self.repository_url)
if url_components.scheme == "https" and self.credentials is not None:
repo_url = url_components.netloc + url_components.path
updated_components = url_components._replace(
... | |
51,882 | 207,149 | 20 | tests/admin_filters/tests.py | 6 | 6 | def test_lookup_with_dynamic_value(self):
| Refs #33476 -- Reformatted code with Black. | test_lookup_with_dynamic_value | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 8 | 14 | https://github.com/django/django.git | 1 | 86 | 0 | 6 | 25 | Python | {
"docstring": "\n Ensure SimpleListFilter can access self.value() inside the lookup.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 8
} | def test_lookup_with_dynamic_value(self):
modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site)
| |
6,164 | 33,821 | 952 | tests/test_tokenization_common.py | 144 | 32 | def test_batch_encode_dynamic_overflowing(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__... | Fix custom tokenizers test (#19052)
* Fix CI for custom tokenizers
* Add nightly tests
* Run CI, run!
* Fix paths
* Typos
* Fix test | test_batch_encode_dynamic_overflowing | f7ce4f1ff789c11f129597a1171b5d549d102e09 | transformers | test_tokenization_common.py | 17 | 46 | https://github.com/huggingface/transformers.git | 10 | 314 | 0 | 71 | 521 | Python | {
"docstring": "\n When calling batch_encode with multiple sequence it can returns different number of\n overflowing encoding for each sequence:\n [\n Sequence 1: [Encoding 1, Encoding 2],\n Sequence 2: [Encoding 1],\n Sequence 3: [Encoding 1, Encoding 2, ... Encoding N... | def test_batch_encode_dynamic_overflowing(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__... | |
@keras_export("keras.optimizers.get") | 81,339 | 275,215 | 300 | keras/optimizers/__init__.py | 106 | 34 | def deserialize(config, custom_objects=None):
# loss_scale_optimizer has a direct dependency of optimizer, import here
# rather than top to avoid the cyclic dependency.
from keras.mixed_precision import (
loss_scale_optimizer,
) # pylint: disable=g-import-not-at-top
all_classes = {
... | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | deserialize | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | __init__.py | 12 | 29 | https://github.com/keras-team/keras.git | 2 | 156 | 1 | 92 | 275 | Python | {
"docstring": "Inverse of the `serialize` function.\n\n Args:\n config: Optimizer configuration dictionary.\n custom_objects: Optional dictionary mapping names (strings) to custom\n objects (classes and functions) to be considered during deserialization.\n\n Returns:\n A Keras Opt... | def deserialize(config, custom_objects=None):
# loss_scale_optimizer has a direct dependency of optimizer, import here
# rather than top to avoid the cyclic dependency.
from keras.mixed_precision import (
loss_scale_optimizer,
) # pylint: disable=g-import-not-at-top
all_classes = {
... |
47,689 | 196,189 | 40 | sympy/combinatorics/permutations.py | 12 | 7 | def commutes_with(self, other):
a = s | Updated import locations | commutes_with | 498015021131af4dbb07eb110e5badaba8250c7b | sympy | permutations.py | 7 | 4 | https://github.com/sympy/sympy.git | 1 | 25 | 0 | 11 | 41 | Python | {
"docstring": "\n Checks if the elements are commuting.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n >>> a = Permutation([1, 4, 3, 0, 2, 5])\n >>> b = Permutation([0, 1, 2, 3, 4, 5])\n >>> a.commutes_with(b)\n True\n >>> ... | def commutes_with(self, other):
a = self.array_form
b = other.array_form
return _af_commutes_with(a, b)
| |
3,396 | 20,492 | 28 | pipenv/patched/notpip/_vendor/pygments/styles/__init__.py | 12 | 5 | def get_all_styles():
yield from STYLE_MAP
for name, _ in find_plugin_styles():
yield | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | get_all_styles | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | __init__.py | 8 | 4 | https://github.com/pypa/pipenv.git | 2 | 19 | 0 | 11 | 35 | Python | {
"docstring": "Return a generator for all styles by name,\n both builtin and plugin.",
"language": "en",
"n_whitespaces": 14,
"n_words": 12,
"vocab_size": 12
} | def get_all_styles():
yield from STYLE_MAP
for name, _ in find_plugin_styles():
yield name
| |
7,231 | 39,439 | 37 | recommenders/utils/python_utils.py | 18 | 12 | def lift(cooccurrence):
diag_rows, diag_cols = _get_row_and_column_matrix(co | Add new item similarity metrics for SAR (#1754)
* Add mutual information similarity in SAR
* Add lexicographers mutual information similarity for SAR
* Add cosine similarity for SAR
* Add inclusion index for SAR
* Typos
* Change SARSingleNode to SAR
* Convert item similarity matrix to np.array
* U... | lift | 1d7341e93d1f03387699fb3c6ae0b6c0e464296f | recommenders | python_utils.py | 11 | 5 | https://github.com/microsoft/recommenders.git | 1 | 48 | 0 | 17 | 85 | Python | {
"docstring": "Helper method to calculate the Lift of a matrix of\n co-occurrences. In comparison with basic co-occurrence and Jaccard\n similarity, lift favours discoverability and serendipity, as\n opposed to co-occurrence that favours the most popular items, and\n Jaccard that is a compromise between ... | def lift(cooccurrence):
diag_rows, diag_cols = _get_row_and_column_matrix(cooccurrence.diagonal())
with np.errstate(invalid="ignore", divide="ignore"):
result = cooccurrence / (diag_rows * diag_cols)
return np.array(result)
| |
22,841 | 107,629 | 37 | lib/matplotlib/artist.py | 12 | 4 | def update(self, props):
return self._update_props(
props, "{cls.__name__!r} object has no property {prop_name!r}")
| Clarify error message for bad keyword arguments.
`plot([], [], foo=42)` previously emitted
```
'Line2D' object has no property 'foo'
```
which refers to the Matplotlib-specific concept of "properties". It now
instead emits
```
Line2D.set() got an unexpected keyword argument 'foo'
```
which is modeled after the standa... | update | d69be2554cf6d1ac711bf433b1d6f176e3290d4f | matplotlib | artist.py | 8 | 3 | https://github.com/matplotlib/matplotlib.git | 1 | 17 | 0 | 12 | 30 | Python | {
"docstring": "\n Update this artist's properties from the dict *props*.\n\n Parameters\n ----------\n props : dict\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 13,
"vocab_size": 12
} | def update(self, props):
return self._update_props(
props, "{cls.__name__!r} object has no property {prop_name!r}")
| |
30,007 | 133,396 | 75 | python/ray/util/sgd/torch/worker_group.py | 22 | 11 | def new_workers_size(self):
remote_resources = ray.available_resources()
max_remote_workers = self._max_workers
new_remote_workers = min(remote_resources.get("CPU" | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | new_workers_size | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | worker_group.py | 13 | 7 | https://github.com/ray-project/ray.git | 2 | 55 | 0 | 16 | 92 | Python | {
"docstring": "Returns number of workers to create based on available resources.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def new_workers_size(self):
remote_resources = ray.available_resources()
max_remote_workers = self._max_workers
new_remote_workers = min(remote_resources.get("CPU", 0), max_remote_workers)
if self._use_gpu:
new_remote_workers = min(remote_resources.get("GPU", 0), new... | |
52,779 | 209,787 | 146 | scapy/arch/windows/__init__.py | 50 | 10 | def setmonitor(self, enable=True):
# type: (bool) -> bool
# We must reset the monitor cache
if enable:
res = self.setmode('monitor')
else:
res = self.setmode('managed')
if not res:
log_runtime.error("Npcap WlanHelper returned with an e... | [Hinty] Core typing: windows (#3684)
* Core typing: windows
Co-authored-by: Pierre <pierre@droids-corp.org> | setmonitor | a2b7a28faff1db058dd22ce097a268e0ad5d1d33 | scapy | __init__.py | 12 | 10 | https://github.com/secdev/scapy.git | 4 | 66 | 0 | 40 | 117 | Python | {
"docstring": "Alias for setmode('monitor') or setmode('managed')\n Only available with Npcap",
"language": "en",
"n_whitespaces": 15,
"n_words": 9,
"vocab_size": 9
} | def setmonitor(self, enable=True):
# type: (bool) -> bool
# We must reset the monitor cache
if enable:
res = self.setmode('monitor')
else:
res = self.setmode('managed')
if not res:
log_runtime.error("Npcap WlanHelper returned with an e... | |
57,477 | 225,607 | 393 | albumentations/augmentations/geometric/rotate.py | 195 | 26 | def _rotated_rect_with_max_area(h, w, angle):
angle = math.radians(angle)
width_is_longer = w >= h
side_long, side_short = (w, h) if width_is_longer else (h, w)
# since the solutions for angle, -angl | add `crop_border` option to Rotate (#1214) | _rotated_rect_with_max_area | a4d33e180c4407990afa1fc03aa079718d738ebd | albumentations | rotate.py | 14 | 17 | https://github.com/albumentations-team/albumentations.git | 5 | 233 | 0 | 102 | 347 | Python | {
"docstring": "\n Given a rectangle of size wxh that has been rotated by 'angle' (in\n degrees), computes the width and height of the largest possible\n axis-aligned rectangle (maximal area) within the rotated rectangle.\n\n Code from: https://stackoverflow.com/questions/16702966/rotate-i... | def _rotated_rect_with_max_area(h, w, angle):
angle = math.radians(angle)
width_is_longer = w >= h
side_long, side_short = (w, h) if width_is_longer else (h, w)
# since the solutions for angle, -angle and 180-angle are all the same,
# it is sufficient to look at the fi... | |
3,362 | 20,426 | 84 | pipenv/patched/notpip/_vendor/pygments/lexer.py | 29 | 10 | def using(_other, **kwargs):
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
| check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for p... | using | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | lexer.py | 13 | 13 | https://github.com/pypa/pipenv.git | 4 | 69 | 0 | 22 | 107 | Python | {
"docstring": "\n Callback that processes the match with a different lexer.\n\n The keyword arguments are forwarded to the lexer, except `state` which\n is handled separately.\n\n `state` specifies the state that the new lexer will start in, and can\n be an enumerable such as ('root', 'inline', 'strin... | def using(_other, **kwargs):
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this: | |
116,975 | 319,613 | 144 | src/documents/tests/test_api.py | 23 | 19 | def test_unset_document_storage_path(self):
self.assertEqual(Document.objects.filter(storage_path=None).count(), 5)
bulk_edit.set_storage_path(
[self.doc1.id],
self.sp1.id,
)
self.assertEqual(Document.objects.filter(storage_path=None).count(), 4)
... | Feature: Dynamic document storage pathes (#916)
* Added devcontainer
* Add feature storage pathes
* Exclude tests and add versioning
* Check escaping
* Check escaping
* Check quoting
* Echo
* Escape
* Escape :
* Double escape \
* Escaping
* Remove if
* Escape colon
* Missing \
... | test_unset_document_storage_path | 69ef26dab04d51e7e102dcb33cd98ddc6ad975fd | paperless-ngx | test_api.py | 12 | 15 | https://github.com/paperless-ngx/paperless-ngx.git | 1 | 136 | 0 | 17 | 215 | Python | {
"docstring": "\n GIVEN:\n - 4 documents without defined storage path\n - 1 document with a defined storage\n WHEN:\n - Bulk edit called to remove storage path from 1 document\n THEN:\n - Single document storage path removed\n ",
"language": "... | def test_unset_document_storage_path(self):
self.assertEqual(Document.objects.filter(storage_path=None).count(), 5)
bulk_edit.set_storage_path(
[self.doc1.id],
self.sp1.id,
)
self.assertEqual(Document.objects.filter(storage_path=None).count(), 4)
... | |
26,314 | 118,602 | 25 | lib/tests/streamlit/cache_spinner_test.py | 4 | 6 | def test_with_spinner(self):
| Rename and refactor `Report` machinery (#4141)
This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app". | test_with_spinner | 704eab3478cf69847825b23dabf15813a8ac9fa2 | streamlit | cache_spinner_test.py | 10 | 3 | https://github.com/streamlit/streamlit.git | 1 | 21 | 0 | 4 | 39 | Python | {
"docstring": "If the show_spinner flag is set, there should be one element in the\n report queue.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 15,
"vocab_size": 14
} | def test_with_spinner(self):
function_with_spinner()
self.assertFalse(self.forward_msg_queue.is_empty())
| |
72,586 | 249,079 | 337 | tests/rest/admin/test_device.py | 82 | 26 | def test_update_device_too_long_display_name(self) -> None:
| Use literals in place of `HTTPStatus` constants in tests (#13469) | test_update_device_too_long_display_name | c97042f7eef3748e17c90e48a4122389a89c4735 | synapse | test_device.py | 14 | 29 | https://github.com/matrix-org/synapse.git | 1 | 159 | 0 | 61 | 257 | Python | {
"docstring": "\n Update a device with a display name that is invalid (too long).\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 11
} | def test_update_device_too_long_display_name(self) -> None:
# Set iniital display name.
update = {"display_name": "new display"}
self.get_success(
self.handler.update_device(
self.other_user, self.other_user_device_id, update
)
)
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.