[dev.boringcrypto] all: merge master into dev.boringcrypto

Change-Id: I0596a40722bf62952bd2eba85ccf3f104de589e4
This commit is contained in:
Dmitri Shuralyov 2020-11-17 18:32:51 -05:00
commit 0985c1bd2d
1335 changed files with 63078 additions and 77546 deletions

View file

@ -31,6 +31,7 @@ Aaron Cannon <cannona@fireantproductions.com>
Aaron France <aaron.l.france@gmail.com>
Aaron Jacobs <jacobsa@google.com>
Aaron Kemp <kemp.aaron@gmail.com>
Aaron Patterson <tenderlove@ruby-lang.org>
Aaron Stein <aaronstein12@gmail.com>
Aaron Torres <tcboox@gmail.com>
Aaron Zinman <aaron@azinman.com>
@ -58,6 +59,7 @@ Adrian Hesketh <adrianhesketh@hushmail.com>
Adrian Nos <nos.adrian@gmail.com>
Adrian O'Grady <elpollouk@gmail.com>
Adrien Bustany <adrien-xx-google@bustany.org>
Adrien Delorme <adrien.delorme@icloud.com>
Adrien Petel <peteladrien@gmail.com>
Aécio Júnior <aeciodantasjunior@gmail.com>
Aeneas Rekkas (arekkas) <aeneas@ory.am>
@ -114,6 +116,7 @@ Alex Zhirov <azhirov@google.com>
Alexander Demakin <alexander.demakin@gmail.com>
Alexander Döring <email@alexd.ch>
Alexander F Rødseth <alexander.rodseth@appeartv.com>
Alexander Greim <alexxx@iltempo.de>
Alexander Guz <kalimatas@gmail.com>
Alexander Kauer <alexander@affine.space>
Alexander Kucherenko <alxkchr@gmail.com>
@ -122,6 +125,7 @@ Alexander Lourier <aml@rulezz.ru>
Alexander Menzhinsky <amenzhinsky@gmail.com>
Alexander Morozov <lk4d4math@gmail.com>
Alexander Neumann <alexander@bumpern.de>
Alexander Nohe <alex.nohe427@gmail.com>
Alexander Orlov <alexander.orlov@loxal.net>
Alexander Pantyukhin <apantykhin@gmail.com>
Alexander Polcyn <apolcyn@google.com>
@ -149,6 +153,7 @@ Alexey Semenyuk <alexsemenyuk88@gmail.com>
Alexis Hildebrandt <surryhill@gmail.com>
Alexis Hunt <lexer@google.com>
Alexis Imperial-Legrand <ail@google.com>
Ali Farooq <ali.farooq0@pm.me>
Ali Rizvi-Santiago <arizvisa@gmail.com>
Aliaksandr Valialkin <valyala@gmail.com>
Alif Rachmawadi <subosito@gmail.com>
@ -156,14 +161,17 @@ Allan Simon <allan.simon@supinfo.com>
Allen Li <ayatane@google.com>
Alok Menghrajani <alok.menghrajani@gmail.com>
Aman Gupta <aman@tmm1.net>
Amarjeet Anand <amarjeetanandsingh@gmail.com>
Amir Mohammad Saied <amir@gluegadget.com>
Amr Mohammed <merodiro@gmail.com>
Amrut Joshi <amrut.joshi@gmail.com>
An Long <aisk1988@gmail.com>
An Xiao <hac@zju.edu.cn>
Anand K. Mistry <anand@mistry.ninja>
Anders Pearson <anders@columbia.edu>
Anderson Queiroz <contato@andersonq.eti.br>
André Carvalho <asantostc@gmail.com>
André Martins <aanm90@gmail.com>
Andre Nathan <andrenth@gmail.com>
Andrea Nodari <andrea.nodari91@gmail.com>
Andrea Spadaccini <spadaccio@google.com>
@ -187,9 +195,11 @@ Andrew Braunstein <awbraunstein@gmail.com>
Andrew Bursavich <abursavich@gmail.com>
Andrew Ekstedt <andrew.ekstedt@gmail.com>
Andrew Etter <andrew.etter@gmail.com>
Andrew G. Morgan <agm@google.com>
Andrew Gerrand <adg@golang.org>
Andrew Harding <andrew@spacemonkey.com>
Andrew Jackura <ajackura@google.com>
Andrew Louis <alouis@digitalocean.com>
Andrew Lutomirski <andy@luto.us>
Andrew Medvedev <andrew.y.medvedev@gmail.com>
Andrew Pilloud <andrewpilloud@igneoussystems.com>
@ -219,6 +229,7 @@ Andy Lindeman <andy@lindeman.io>
Andy Maloney <asmaloney@gmail.com>
Andy Pan <panjf2000@gmail.com>
Andy Walker <walkeraj@gmail.com>
Andy Wang <cbeuw.andy@gmail.com>
Andzej Maciusovic <andzej.maciusovic@gmail.com>
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
Angelo Bulfone <mbulfone@gmail.com>
@ -226,6 +237,7 @@ Anh Hai Trinh <anh.hai.trinh@gmail.com>
Anit Gandhi <anitgandhi@gmail.com>
Ankit Goyal <ankit3goyal@gmail.com>
Anmol Sethi <anmol@aubble.com>
Annirudh Prasad <annirudh@wandb.com>
Anschel Schaffer-Cohen <anschelsc@gmail.com>
Anthony Alves <cvballa3g0@gmail.com>
Anthony Canino <anthony.canino1@gmail.com>
@ -239,15 +251,18 @@ Anthony Woods <awoods@raintank.io>
Antoine GIRARD <sapk@sapk.fr>
Antoine Martin <antoine97.martin@gmail.com>
Anton Gyllenberg <anton@iki.fi>
Anton Kuklin <anton.a.kuklin@gmail.com>
Antonin Amand <antonin.amand@gmail.com>
Antonio Antelo <aantelov87@gmail.com>
Antonio Bibiano <antbbn@gmail.com>
Antonio Huete Jimenez <tuxillo@quantumachine.net>
Antonio Murdaca <runcom@redhat.com>
Antonio Troina <thoeni@gmail.com>
Anze Kolar <me@akolar.com>
Aofei Sheng <aofei@aofeisheng.com>
Apisak Darakananda <pongad@gmail.com>
Aram Hăvărneanu <aram@mgk.ro>
Araragi Hokuto <kanseihonbucho@protonmail.com>
Arash Bina <arash@arash.io>
Arda Güçlü <ardaguclu@gmail.com>
Areski Belaid <areski@gmail.com>
@ -273,6 +288,7 @@ Audrius Butkevicius <audrius.butkevicius@gmail.com>
Augusto Roman <aroman@gmail.com>
Aulus Egnatius Varialus <varialus@gmail.com>
Aurélien Rainone <aurelien.rainone@gmail.com>
Aurélio A. Heckert <aurium@gmail.com>
Austin Clements <austin@google.com> <aclements@csail.mit.edu>
Avi Flax <avi@timehop.com>
awaw fumin <awawfumin@gmail.com>
@ -315,6 +331,7 @@ Benoit Sigoure <tsunanet@gmail.com>
Berengar Lehr <Berengar.Lehr@gmx.de>
Berkant Ipek <41230766+0xbkt@users.noreply.github.com>
Bharath Thiruveedula <tbharath91@gmail.com>
Bhavin Gandhi <bhavin7392@gmail.com>
Bill Neubauer <wcn@golang.org> <wcn@google.com> <bill.neubauer@gmail.com>
Bill O'Farrell <billo@ca.ibm.com>
Bill Prin <waprin@google.com>
@ -322,6 +339,7 @@ Bill Thiede <couchmoney@gmail.com>
Bill Zissimopoulos <billziss@navimatics.com>
Billie Harold Cleek <bhcleek@gmail.com>
Billy Lynch <wlynch@google.com>
Billy Zaelani Malik <m.billyzaelani@gmail.com>
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
Bjorn Tillenius <bjorn@tillenius.me>
Bjorn Tipling <bjorn.tipling@gmail.com>
@ -331,12 +349,15 @@ Blake Mesdag <blakemesdag@gmail.com>
Blake Mizerany <blake.mizerany@gmail.com>
Blixt <me@blixt.nyc>
Bob Briski <rbriski@gmail.com>
Bob McNaughton <bobmcn@gmail.com>
Bob Potter <bobby.potter@gmail.com>
Bobby DeSimone <bobbydesimone@gmail.com>
Bobby Powers <bobbypowers@gmail.com>
Boqin Qin <bobbqqin@gmail.com>
Boris Nagaev <nagaev@google.com>
Borja Clemente <borja.clemente@gmail.com>
Brad Burch <brad.burch@gmail.com>
Brad Erickson <bderickson@gmail.com>
Brad Fitzpatrick <bradfitz@golang.org> <bradfitz@gmail.com>
Brad Garcia <bgarcia@golang.org>
Brad Jones <rbjones@google.com>
@ -351,6 +372,7 @@ Brandon Bennett <bbennett@fb.com>
Brandon Gilmore <varz@google.com>
Brandon Philips <brandon@ifup.org>
Brandon Ryan <bjryan19@gmail.com>
Brayden Cloud <bcloud@google.com>
Brendan Daniel Tracey <tracey.brendan@gmail.com>
Brendan O'Dea <bod@golang.org>
Brett Cannon <bcannon@gmail.com>
@ -390,6 +412,7 @@ Carlos Castillo <cookieo9@gmail.com>
Carlos Cirello <uldericofilho@gmail.com>
Carlos Eduardo <me@carlosedp.com>
Carlos Eduardo Seo <cseo@linux.vnet.ibm.com>
Carlos Iriarte <ciriarte@gmail.com>
Carlos Souza <carloshrsouza@gmail.com>
Carolyn Van Slyck <me@carolynvanslyck.com>
Carrie Bynon <cbynon@gmail.com>
@ -405,6 +428,7 @@ Chad Rosier <mrosier.qdt@qualcommdatacenter.com>
ChaiShushan <chaishushan@gmail.com>
Changkun Ou <hi@changkun.us>
Channing Kimble-Brown <channing@golang.org>
Chao Xu <xuchao@google.com>
Charles Fenwick Elliott <Charles@FenwickElliott.io>
Charles Kenney <charlesc.kenney@gmail.com>
Charles L. Dorian <cldorian@gmail.com>
@ -426,6 +450,7 @@ Chris Howey <howeyc@gmail.com>
Chris Hundt <hundt@google.com>
Chris Jones <chris@cjones.org> <chris.jones.yar@gmail.com>
Chris Kastorff <encryptio@gmail.com>
Chris Le Roy <brompwnie@users.noreply.github.com>
Chris Lennert <calennert@gmail.com>
Chris Liles <caveryliles@gmail.com>
Chris Manghane <cmang@golang.org>
@ -475,6 +500,7 @@ Conrad Meyer <cemeyer@cs.washington.edu>
Conrado Gouvea <conradoplg@gmail.com>
Constantin Konstantinidis <constantinkonstantinidis@gmail.com>
Corey Thomasson <cthom.lists@gmail.com>
Corne van der Plas <vdplas@gmail.com>
Cosmos Nicolaou <cnicolaou@google.com>
Costin Chirvasuta <ctin@google.com>
Craig Citro <craigcitro@google.com>
@ -506,9 +532,11 @@ Daniel Ingram <ingramds@appstate.edu>
Daniel Johansson <dajo2002@gmail.com>
Daniel Kerwin <d.kerwin@gini.net>
Daniel Krech <eikeon@eikeon.com>
Daniel Kumor <rdkumor@gmail.com>
Daniel Langner <s8572327@gmail.com>
Daniel Lidén <daniel.liden.87@gmail.com>
Daniel Lublin <daniel@lublin.se>
Daniel Mangum <georgedanielmangum@gmail.com>
Daniel Martí <mvdan@mvdan.cc>
Daniel Morsing <daniel.morsing@gmail.com>
Daniel Nadasi <dnadasi@google.com>
@ -519,6 +547,8 @@ Daniel Speichert <daniel@speichert.pl>
Daniel Theophanes <kardianos@gmail.com>
Daniel Upton <daniel@floppy.co>
Daniela Petruzalek <daniela.petruzalek@gmail.com>
Danish Dua <danishdua@google.com>
Danish Prakash <grafitykoncept@gmail.com>
Danny Rosseau <daniel.rosseau@gmail.com>
Daria Kolistratova <daria.kolistratova@intel.com>
Darien Raymond <admin@v2ray.com>
@ -542,6 +572,7 @@ David Brophy <dave@brophy.uk>
David Bürgin <676c7473@gmail.com>
David Calavera <david.calavera@gmail.com>
David Carlier <devnexen@gmail.com>
David Carter <fresco.raja@gmail.com>
David Chase <drchase@google.com>
David Covert <davidhcovert@gmail.com>
David Crawshaw <david.crawshaw@zentus.com> <crawshaw@google.com> <crawshaw@golang.org>
@ -550,6 +581,7 @@ David Finkel <david.finkel@gmail.com>
David Forsythe <dforsythe@gmail.com>
David G. Andersen <dave.andersen@gmail.com>
David Glasser <glasser@meteor.com>
David Golden <david@autopragmatic.com>
David Heuschmann <heuschmann.d@gmail.com>
David Howden <dhowden@gmail.com>
David Hubbard <dsp@google.com>
@ -574,6 +606,7 @@ David Volquartz Lebech <david@lebech.info>
David Wimmer <davidlwimmer@gmail.com>
Davies Liu <davies.liu@gmail.com>
Davor Kapsa <davor.kapsa@gmail.com>
Dean Eigenmann <7621705+decanus@users.noreply.github.com>
Dean Prichard <dean.prichard@gmail.com>
Deepak Jois <deepak.jois@gmail.com>
Denis Bernard <db047h@gmail.com>
@ -619,6 +652,7 @@ Dmitry Mottl <dmitry.mottl@gmail.com>
Dmitry Neverov <dmitry.neverov@gmail.com>
Dmitry Savintsev <dsavints@gmail.com>
Dmitry Yakunin <nonamezeil@gmail.com>
Doga Fincan <doga@icloud.com>
Domas Tamašauskas <puerdomus@gmail.com>
Domen Ipavec <domen@ipavec.net>
Dominic Green <dominicgreen1@gmail.com>
@ -642,6 +676,7 @@ Dustin Sallings <dsallings@gmail.com>
Dustin Shields-Cloues <dcloues@gmail.com>
Dvir Volk <dvir@everything.me> <dvirsky@gmail.com>
Dylan Waits <dylan@waits.io>
Ed Schouten <ed@nuxi.nl>
Edan Bedrik <3d4nb3@gmail.com>
Eddie Scholtz <escholtz@google.com>
Eden Li <eden.li@gmail.com>
@ -659,11 +694,13 @@ Elena Grahovac <elena@grahovac.me>
Eli Bendersky <eliben@google.com>
Elias Naur <mail@eliasnaur.com> <elias.naur@gmail.com>
Elliot Morrison-Reed <elliotmr@gmail.com>
Ellison Leão <ellisonleao@gmail.com>
Emerson Lin <linyintor@gmail.com>
Emil Hessman <emil@hessman.se>
Emil Mursalimov <mursalimovemeel@gmail.com>
Emilien Kenler <hello@emilienkenler.com>
Emmanuel Odeke <emm.odeke@gmail.com> <odeke@ualberta.ca>
Emrecan Bati <emrecanbati@gmail.com>
Eno Compton <enocom@google.com>
Eoghan Sherry <ejsherry@gmail.com>
Eric Biggers <ebiggers@google.com>
@ -682,6 +719,7 @@ Eric Rescorla <ekr@rtfm.com>
Eric Roshan-Eisner <eric.d.eisner@gmail.com>
Eric Rutherford <erutherford@gmail.com>
Eric Rykwalder <e.rykwalder@gmail.com>
Erick Tryzelaar <etryzelaar@google.com>
Erik Aigner <aigner.erik@gmail.com>
Erik Dubbelboer <erik@dubbelboer.com>
Erik St. Martin <alakriti@gmail.com>
@ -694,6 +732,7 @@ Esko Luontola <esko.luontola@gmail.com>
Ethan Burns <eaburns@google.com>
Ethan Miller <eamiller@us.ibm.com>
Euan Kemp <euank@euank.com>
Eugene Formanenko <mo4islona@gmail.com>
Eugene Kalinin <e.v.kalinin@gmail.com>
Evan Broder <evan@stripe.com>
Evan Brown <evanbrown@google.com>
@ -705,6 +744,7 @@ Evan Kroske <evankroske@google.com>
Evan Martin <evan.martin@gmail.com>
Evan Phoenix <evan@phx.io>
Evan Shaw <chickencha@gmail.com>
Evgeniy Kulikov <tuxuls@gmail.com>
Evgeniy Polyakov <zbr@ioremap.net>
Ewan Chou <coocood@gmail.com>
Ewan Valentine <ewan.valentine89@gmail.com>
@ -725,8 +765,10 @@ Fedor Indutny <fedor@indutny.com>
Fedor Korotkiy <dartslon@gmail.com>
Felipe Oliveira <felipeweb.programador@gmail.com>
Felix Bünemann <Felix.Buenemann@gmail.com>
Felix Cornelius <9767036+fcornelius@users.noreply.github.com>
Felix Geisendörfer <haimuiba@gmail.com>
Felix Kollmann <fk@konsorten.de>
Ferenc Szabo <frncmx@gmail.com>
Filip Gruszczyński <gruszczy@gmail.com>
Filip Haglund <drathier@users.noreply.github.com>
Filip Stanis <fstanis@google.com>
@ -774,6 +816,7 @@ Gary Elliott <garyelliott@google.com>
Gaurish Sharma <contact@gaurishsharma.com>
Gautham Thambidorai <gautham.dorai@gmail.com>
Gauthier Jolly <gauthier.jolly@gmail.com>
Gawen Arab <gawen.arab@c.zen.ly>
Geert-Johan Riemer <gjr19912@gmail.com>
Genevieve Luyt <genevieve.luyt@gmail.com>
Gengliang Wang <ltnwgl@gmail.com>
@ -795,6 +838,7 @@ Gianguido Sora` <g.sora4@gmail.com>
Gideon Jan-Wessel Redelinghuys <gjredelinghuys@gmail.com>
Giles Lean <giles.lean@pobox.com>
Giovanni Bajo <rasky@develer.com>
GitHub User @aca (50316549) <acadx0@gmail.com>
GitHub User @ajnirp (1688456) <ajnirp@users.noreply.github.com>
GitHub User @ajz01 (4744634) <ajzdenek@gmail.com>
GitHub User @alkesh26 (1019076) <alkesh26@gmail.com>
@ -805,12 +849,18 @@ GitHub User @bakape (7851952) <bakape@gmail.com>
GitHub User @bgadrian (830001) <aditza8@gmail.com>
GitHub User @bontequero (2674999) <bontequero@gmail.com>
GitHub User @cch123 (384546) <buaa.cch@gmail.com>
GitHub User @chainhelen (7046329) <chainhelen@gmail.com>
GitHub User @chanxuehong (3416908) <chanxuehong@gmail.com>
GitHub User @cncal (23520240) <flycalvin@qq.com>
GitHub User @DQNEO (188741) <dqneoo@gmail.com>
GitHub User @Dreamacro (8615343) <chuainian@gmail.com>
GitHub User @dupoxy (1143957) <dupoxy@users.noreply.github.com>
GitHub User @erifan (31343225) <eric.fang@arm.com>
GitHub User @esell (9735165) <eujon.sellers@gmail.com>
GitHub User @fatedier (7346661) <fatedier@gmail.com>
GitHub User @frennkie (6499251) <mail@rhab.de>
GitHub User @geedchin (11672310) <geedchin@gmail.com>
GitHub User @GrigoriyMikhalkin (3637857) <grigoriymikhalkin@gmail.com>
GitHub User @hengwu0 (41297446) <41297446+hengwu0@users.noreply.github.com>
GitHub User @itchyny (375258) <itchyny@hatena.ne.jp>
GitHub User @jinmiaoluo (39730824) <jinmiaoluo@icloud.com>
@ -820,11 +870,13 @@ GitHub User @kc1212 (1093806) <kc1212@users.noreply.github.com>
GitHub User @Kropekk (13366453) <kamilkropiewnicki@gmail.com>
GitHub User @linguohua (3434367) <lghchinaidea@gmail.com>
GitHub User @LotusFenn (13775899) <fenn.lotus@gmail.com>
GitHub User @ly303550688 (11519839) <yang.liu636@gmail.com>
GitHub User @madiganz (18340029) <zacharywmadigan@gmail.com>
GitHub User @maltalex (10195391) <code@bit48.net>
GitHub User @Matts966 (28551465) <Matts966@users.noreply.github.com>
GitHub User @micnncim (21333876) <micnncim@gmail.com>
GitHub User @mkishere (224617) <224617+mkishere@users.noreply.github.com>
GitHub User @nu50218 (40682920) <nu_ll@icloud.com>
GitHub User @OlgaVlPetrova (44112727) <OVPpetrova@gmail.com>
GitHub User @pityonline (438222) <pityonline@gmail.com>
GitHub User @po3rin (29445112) <abctail30@gmail.com>
@ -836,6 +888,7 @@ GitHub User @shogo-ma (9860598) <Choroma194@gmail.com>
GitHub User @skanehira (7888591) <sho19921005@gmail.com>
GitHub User @tatsumack (4510569) <tatsu.mack@gmail.com>
GitHub User @tell-k (26263) <ffk2005@gmail.com>
GitHub User @tennashi (10219626) <tennashio@gmail.com>
GitHub User @uhei (2116845) <uhei@users.noreply.github.com>
GitHub User @uropek (39370426) <uropek@gmail.com>
GitHub User @utkarsh-extc (53217283) <utkarsh.extc@gmail.com>
@ -861,6 +914,7 @@ Greg Thelen <gthelen@google.com>
Greg Ward <greg@gerg.ca>
Grégoire Delattre <gregoire.delattre@gmail.com>
Gregory Man <man.gregory@gmail.com>
Gregory Petrosyan <gregory.petrosyan@gmail.com>
Guilherme Caruso <gui.martinscaruso@gmail.com>
Guilherme Garnier <guilherme.garnier@gmail.com>
Guilherme Goncalves <guilhermeaugustosg@gmail.com>
@ -917,6 +971,7 @@ Hitoshi Mitake <mitake.hitoshi@gmail.com>
Holden Huang <ttyh061@gmail.com>
Hong Ruiqi <hongruiqi@gmail.com>
Hongfei Tan <feilengcui008@gmail.com>
Horacio Duran <horacio.duran@gmail.com>
Horst Rutter <hhrutter@gmail.com>
Hossein Sheikh Attar <hattar@google.com>
Howard Zhang <howard.zhang@arm.com>
@ -927,6 +982,7 @@ Huan Du <i@huandu.me>
Hugues Bruant <hugues.bruant@gmail.com>
Huy Le <huy.dinh.le.89@gmail.com>
Hyang-Ah Hana Kim <hakim@google.com> <hyangah@gmail.com>
Hyoyoung Chang <hyoyoung@gmail.com>
Ian Cottrell <iancottrell@google.com>
Ian Davis <nospam@iandavis.com>
Ian Gudger <ian@loosescre.ws>
@ -986,6 +1042,7 @@ Jake B <doogie1012@gmail.com>
Jakob Borg <jakob@nym.se>
Jakob Weisblat <jakobw@mit.edu>
Jakub Čajka <jcajka@redhat.com>
Jakub Kaczmarzyk <jakubk@mit.edu>
Jakub Ryszard Czarnowicz <j.czarnowicz@gmail.com>
Jamal Carvalho <jamal.a.carvalho@gmail.com>
James Aguilar <jaguilar@google.com>
@ -1032,6 +1089,7 @@ Jan Steinke <jan.steinke@gmail.com>
Jan Ziak <0xe2.0x9a.0x9b@gmail.com>
Jani Monoses <jani.monoses@ubuntu.com> <jani.monoses@gmail.com>
Jannis Andrija Schnitzer <jannis@schnitzer.im>
Jared Allard <jaredallard@users.noreply.github.com>
Jared Culp <jculp14@gmail.com>
Jaroslavas Počepko <jp@webmaster.ms>
Jason A. Donenfeld <Jason@zx2c4.com>
@ -1086,8 +1144,11 @@ Jerrin Shaji George <jerrinsg@gmail.com>
Jess Frazelle <me@jessfraz.com>
Jesse Szwedko <jesse.szwedko@gmail.com>
Jesús Espino <jespinog@gmail.com>
Jia Zhan <jzhan@uber.com>
Jiacai Liu <jiacai2050@gmail.com>
Jianing Yu <jnyu@google.com>
Jianqiao Li <jianqiaoli@google.com>
Jie Ma <jienius@outlook.com>
Jihyun Yu <yjh0502@gmail.com>
Jim Cote <jfcote87@gmail.com>
Jim Kingdon <jim@bolt.me>
@ -1135,6 +1196,7 @@ John Howard Palevich <jack.palevich@gmail.com>
John Jeffery <jjeffery@sp.com.au>
John Jenkins <twodopeshaggy@gmail.com>
John Leidegren <john.leidegren@gmail.com>
John McCabe <john@johnmccabe.net>
John Moore <johnkenneth.moore@gmail.com>
John Newlin <jnewlin@google.com>
John Papandriopoulos <jpap.code@gmail.com>
@ -1146,6 +1208,7 @@ John Tuley <john@tuley.org>
John Weldon <johnweldon4@gmail.com>
Johnny Luo <johnnyluo1980@gmail.com>
Jon Chen <jchen@justin.tv>
Jon Johnson <jonjohnson@google.com>
Jonas Bernoulli <jonas@bernoul.li>
Jonathan Allie <jonallie@google.com>
Jonathan Amsterdam <jba@google.com>
@ -1165,6 +1228,7 @@ Jonathon Lacher <jonathon.lacher@gmail.com>
Jongmin Kim <atomaths@gmail.com>
Joonas Kuorilehto <joneskoo@derbian.fi>
Joop Kiefte <ikojba@gmail.com> <joop@kiefte.net>
Jordan Christiansen <xordspar0@gmail.com>
Jordan Krage <jmank88@gmail.com>
Jordan Lewis <jordanthelewis@gmail.com>
Jordan Liggitt <liggitt@google.com>
@ -1177,6 +1241,7 @@ Josa Gesell <josa@gesell.me>
Jose Luis Vázquez González <josvazg@gmail.com>
Joseph Bonneau <jcb@google.com>
Joseph Holsten <joseph@josephholsten.com>
Josh Baum <joshbaum@google.com>
Josh Bleecher Snyder <josharian@gmail.com>
Josh Chorlton <jchorlton@gmail.com>
Josh Deprez <josh.deprez@gmail.com>
@ -1185,8 +1250,10 @@ Josh Hoak <jhoak@google.com>
Josh Holland <jrh@joshh.co.uk>
Josh Roppo <joshroppo@gmail.com>
Josh Varga <josh.varga@gmail.com>
Joshua Bezaleel Abednego <joshua.bezaleel@gmail.com>
Joshua Boelter <joshua.boelter@intel.com>
Joshua Chase <jcjoshuachase@gmail.com>
Joshua Crowgey <jcrowgey@uw.edu>
Joshua M. Clulow <josh.clulow@joyent.com>
Joshua Rubin <joshua@rubixconsulting.com>
Josselin Costanzi <josselin@costanzi.fr>
@ -1265,6 +1332,7 @@ Kenji Yano <kenji.yano@gmail.com>
Kenneth Shaw <kenshaw@gmail.com>
Kenny Grant <kennygrant@gmail.com>
Kenta Mori <zoncoen@gmail.com>
Kerollos Magdy <kerolloz@yahoo.com>
Ketan Parmar <ketanbparmar@gmail.com>
Kevan Swanberg <kevswanberg@gmail.com>
Kevin Ballard <kevin@sb.org>
@ -1277,10 +1345,14 @@ Kevin Malachowski <chowski@google.com>
Kevin Ruffin <kruffin@gmail.com>
Kevin Vu <kevin.m.vu@gmail.com>
Kevin Zita <bleedgreenandgold@gmail.com>
Keyan Pishdadian <kpishdadian@gmail.com>
Kezhu Wang <kezhuw@gmail.com>
Khosrow Moossavi <khos2ow@gmail.com>
Kieran Colford <kieran@kcolford.com>
Kim Shrier <kshrier@racktopsystems.com>
Kim Yongbin <kybinz@gmail.com>
Kir Kolyshkin <kolyshkin@gmail.com>
Kirill Korotaev <kirillx@gmail.com>
Kirill Motkov <Motkov.Kirill@gmail.com>
Kirill Smelkov <kirr@nexedi.com>
Kirill Tatchihin <kirabsuir@gmail.com>
@ -1308,6 +1380,7 @@ Kyle Consalus <consalus@gmail.com>
Kyle Isom <kyle@gokyle.net>
Kyle Jones <kyle@kyledj.com>
Kyle Lemons <kyle@kylelemons.net> <kevlar@google.com>
Kyle Nusbaum <kyle@datadog.com>
Kyle Shannon <kyle@pobox.com>
Kyle Spiers <eiais@google.com>
Kyle Wood <kyle@kylewood.cc>
@ -1339,6 +1412,8 @@ Leonardo Comelli <leonardo.comelli@gmail.com>
Leonel Quinteros <leonel.quinteros@gmail.com>
Lev Shamardin <shamardin@gmail.com>
Lewin Bormann <lewin.bormann@gmail.com>
Liam Haworth <liam@haworth.id.au>
Lily Chung <lilithkchung@gmail.com>
Lion Yang <lion@aosc.xyz>
Liz Rice <liz@lizrice.com>
Lloyd Dewolf <foolswisdom@gmail.com>
@ -1396,6 +1471,7 @@ Marcel van Lohuizen <mpvl@golang.org>
Marcelo Cantos <marcelo.cantos@gmail.com>
Marcelo E. Magallon <marcelo.magallon@gmail.com>
Marco Hennings <marco.hennings@freiheit.com>
Marcus Weiner <marcus.weiner@gmail.com>
Marcus Willock <crazcalm@gmail.com>
Marga Manterola <marga@google.com>
Mariano Cano <mariano@smallstep.com>
@ -1426,6 +1502,7 @@ Markus Duft <markus.duft@salomon.at>
Markus Sonderegger <marraison@gmail.com>
Markus Zimmermann <zimmski@gmail.com>
Marten Seemann <martenseemann@gmail.com>
Martin Asquino <martin.asquino@gmail.com>
Martin Bertschler <mbertschler@gmail.com>
Martin Garton <garton@gmail.com>
Martin Habbecke <marhab@google.com>
@ -1449,6 +1526,7 @@ Maryan Hratson <gmarik@gmail.com>
Masahiro Furudate <masahiro.furudate@gmail.com>
Masahiro Wakame <vvakame@gmail.com>
Masaki Yoshida <yoshida.masaki@gmail.com>
Masaya Watanabe <sfbgwm30@gmail.com>
Mat Byczkowski <mbyczkowski@gmail.com>
Mat Ryer <thatmatryer@gmail.com>
Máté Gulyás <mgulyas86@gmail.com>
@ -1495,6 +1573,7 @@ Max Ushakov <ushmax@gmail.com>
Maxim Eryomenko <moeryomenko@gmail.com>
Maxim Khitrov <max@mxcrypt.com>
Maxim Pimenov <mpimenov@google.com>
Maxim Pugachev <pugachev.mm@gmail.com>
Maxim Ushakov <ushakov@google.com>
Maxime de Roucy <maxime.deroucy@gmail.com>
Máximo Cuadros Ortiz <mcuadros@gmail.com>
@ -1549,6 +1628,7 @@ Michal Bohuslávek <mbohuslavek@gmail.com>
Michal Cierniak <cierniak@google.com>
Michał Derkacz <ziutek@lnet.pl>
Michal Franc <lam.michal.franc@gmail.com>
Michał Łowicki <mlowicki@gmail.com>
Michal Pristas <michal.pristas@gmail.com>
Michal Rostecki <mrostecki@suse.de>
Michalis Kargakis <michaliskargakis@gmail.com>
@ -1556,6 +1636,7 @@ Michel Lespinasse <walken@google.com>
Mickael Kerjean <mickael.kerjean@gmail.com>
Mickey Reiss <mickeyreiss@gmail.com>
Miek Gieben <miek@miek.nl> <remigius.gieben@gmail.com>
Miguel Acero <acero@google.com>
Miguel Mendez <stxmendez@gmail.com>
Miguel Molina <hi@mvader.me>
Mihai Borobocea <MihaiBorobocea@gmail.com>
@ -1582,6 +1663,7 @@ Mikio Hara <mikioh.mikioh@gmail.com>
Mikkel Krautz <mikkel@krautz.dk> <krautz@gmail.com>
Mikołaj Baranowski <mikolajb@gmail.com>
Milan Knezevic <milan.knezevic@mips.com>
Milan Patel <bicelot3@gmail.com>
Milutin Jovanović <jovanovic.milutin@gmail.com>
MinJae Kwon <mingrammer@gmail.com>
Miquel Sabaté Solà <mikisabate@gmail.com>
@ -1603,8 +1685,10 @@ Mrunal Patel <mrunalp@gmail.com>
Muhammad Falak R Wani <falakreyaz@gmail.com>
Muhammed Uluyol <uluyol0@gmail.com>
Muir Manders <muir@mnd.rs>
Mukesh Sharma <sharma.mukesh439@gmail.com>
Mura Li <mura_li@castech.com.tw>
Mykhailo Lesyk <mikhail@lesyk.org>
Naman Aggarwal <aggarwal.nam@gmail.com>
Nan Deng <monnand@gmail.com>
Nao Yonashiro <owan.orisano@gmail.com>
Naoki Kanatani <k12naoki@gmail.com>
@ -1612,6 +1696,7 @@ Nate Wilkinson <nathanwilk7@gmail.com>
Nathan Cantelmo <n.cantelmo@gmail.com>
Nathan Caza <mastercactapus@gmail.com>
Nathan Dias <nathan.dias@orijtech.com>
Nathan Fiscaletti <nathan.fiscaletti@vrazo.com>
Nathan Humphreys <nkhumphreys@gmail.com>
Nathan John Youngman <nj@nathany.com>
Nathan Otterness <otternes@cs.unc.edu>
@ -1621,6 +1706,7 @@ Nathan Youngman <git@nathany.com>
Nathan(yinian) Hu <nathanhu@google.com>
Nathaniel Cook <nvcook42@gmail.com>
Naveen Kumar Sangi <naveenkumarsangi@protonmail.com>
Neeilan Selvalingam <neeilan96@gmail.com>
Neelesh Chandola <neelesh.c98@gmail.com>
Neil Lyons <nwjlyons@googlemail.com>
Neuman Vong <neuman.vong@gmail.com>
@ -1661,17 +1747,20 @@ Nikita Vanyasin <nikita.vanyasin@gmail.com>
Niklas Schnelle <niklas.schnelle@gmail.com>
Niko Dziemba <niko@dziemba.com>
Nikolay Turpitko <nikolay@turpitko.com>
Nikson Kanti Paul <nikson.sust@gmail.com>
Nils Larsgård <nilsmagnus@gmail.com>
Nir Soffer <nirsof@gmail.com>
Niranjan Godbole <niranjan8192@gmail.com>
Nishanth Shanmugham <nishanth.gerrard@gmail.com>
Noah Campbell <noahcampbell@gmail.com>
Noah Goldman <noahg34@gmail.com>
Noble Johnson <noblepoly@gmail.com>
Nodir Turakulov <nodir@google.com>
Noel Georgi <git@frezbo.com>
Norberto Lopes <nlopes.ml@gmail.com>
Norman B. Lancaster <qbradq@gmail.com>
Nuno Cruces <ncruces@users.noreply.github.com>
Obeyda Djeffal <djefobey@gmail.com>
Odin Ugedal <odin@ugedal.com>
Oleg Bulatov <dmage@yandex-team.ru>
Oleg Vakheta <helginet@gmail.com>
@ -1689,6 +1778,7 @@ Omar Jarjur <ojarjur@google.com>
Oryan Moshe <iamoryanmoshe@gmail.com>
Osamu TONOMORI <osamingo@gmail.com>
Özgür Kesim <oec-go@kesim.org>
Pablo Caderno <kaderno@gmail.com>
Pablo Lalloni <plalloni@gmail.com>
Pablo Rozas Larraondo <pablo.larraondo@anu.edu.au>
Pablo Santiago Blum de Aguiar <scorphus@gmail.com>
@ -1702,6 +1792,8 @@ Parker Moore <parkrmoore@gmail.com>
Parminder Singh <parmsingh101@gmail.com>
Pascal Dierich <pascal@pascaldierich.com>
Pascal S. de Kloe <pascal@quies.net>
Paschalis Tsilias <paschalis.tsilias@gmail.com>
Pasi Tähkäpää <pasi.tahkapaa@gmail.com>
Pat Moroney <pat@pat.email>
Patrick Barker <barkerp@vmware.com>
Patrick Crosby <patrick@stathat.com>
@ -1718,6 +1810,7 @@ Paul A Querna <paul.querna@gmail.com>
Paul Borman <borman@google.com>
Paul Boyd <boyd.paul2@gmail.com>
Paul Chang <paulchang@google.com>
Paul D. Weber <x0bdev@gmail.com>
Paul Hammond <paul@paulhammond.org>
Paul Hankin <paulhankin@google.com>
Paul Jolly <paul@myitcv.org.uk>
@ -1743,8 +1836,10 @@ Pavel Zinovkin <pavel.zinovkin@gmail.com>
Pavlo Sumkin <ymkins@gmail.com>
Pawel Knap <pawelknap88@gmail.com>
Pawel Szczur <filemon@google.com>
Pei Xian Chee <luciolas1991@gmail.com>
Percy Wegmann <ox.to.a.cart@gmail.com>
Perry Abbott <perry.j.abbott@gmail.com>
Petar Dambovaliev <petar.atanasov.1987@gmail.com>
Petar Maymounkov <petarm@gmail.com>
Peter Armitage <peter.armitage@gmail.com>
Peter Bourgon <peter@bourgon.org>
@ -1781,6 +1876,7 @@ Philip Hofer <phofer@umich.edu>
Philip K. Warren <pkwarren@gmail.com>
Philip Nelson <me@pnelson.ca>
Philipp Stephani <phst@google.com>
Pierre Carru <pierre.carru@eshard.com>
Pierre Durand <pierredurand@gmail.com>
Pierre Prinetti <pierreprinetti@gmail.com>
Pierre Roullon <pierre.roullon@gmail.com>
@ -1789,11 +1885,14 @@ Pieter Droogendijk <pieter@binky.org.uk>
Pietro Gagliardi <pietro10@mac.com>
Piyush Mishra <piyush@codeitout.com>
Plekhanov Maxim <kishtatix@gmail.com>
Polina Osadcha <polliosa@google.com>
Pontus Leitzler <leitzler@gmail.com>
Povilas Versockas <p.versockas@gmail.com>
Prasanga Siripala <pj@pjebs.com.au>
Prasanna Swaminathan <prasanna@mediamath.com>
Prashant Agrawal <prashant.a.vjti@gmail.com>
Prashant Varanasi <prashant@prashantv.com>
Praveen Kumar <praveen+git@kumar.in>
Pravendra Singh <hackpravj@gmail.com>
Preetam Jinka <pj@preet.am>
Pure White <wu.purewhite@gmail.com>
@ -1804,6 +1903,7 @@ Quan Yong Zhai <qyzhai@gmail.com>
Quentin Perez <qperez@ocs.online.net>
Quentin Renard <contact@asticode.com>
Quentin Smith <quentin@golang.org>
Quey-Liang Kao <s101062801@m101.nthu.edu.tw>
Quinn Slack <sqs@sourcegraph.com>
Quinten Yearsley <qyearsley@chromium.org>
Quoc-Viet Nguyen <afelion@gmail.com>
@ -1831,6 +1931,7 @@ Reilly Watson <reillywatson@gmail.com>
Reinaldo de Souza Jr <juniorz@gmail.com>
Remi Gillig <remigillig@gmail.com>
Rémy Oudompheng <oudomphe@phare.normalesup.org> <remyoudompheng@gmail.com>
Ren Ogaki <re.yuz77777@gmail.com>
Rens Rikkerink <Ikkerens@users.noreply.github.com>
Rhys Hiltner <rhys@justin.tv>
Ricardo Padilha <ricardospadilha@gmail.com>
@ -1842,6 +1943,8 @@ Richard Eric Gavaletz <gavaletz@gmail.com>
Richard Gibson <richard.gibson@gmail.com>
Richard Miller <miller.research@gmail.com>
Richard Musiol <mail@richard-musiol.de> <neelance@gmail.com>
Richard Ulmer <codesoap@mailbox.org>
Richard Wilkes <wilkes@me.com>
Rick Arnold <rickarnoldjr@gmail.com>
Rick Hudson <rlh@golang.org>
Rick Sayre <whorfin@gmail.com>
@ -1860,6 +1963,7 @@ Robert Figueiredo <robfig@gmail.com>
Robert Griesemer <gri@golang.org>
Robert Hencke <robert.hencke@gmail.com>
Robert Iannucci <iannucci@google.com>
Robert Kuska <rkuska@gmail.com>
Robert Obryk <robryk@gmail.com>
Robert Sesek <rsesek@google.com>
Robert Snedegar <roberts@google.com>
@ -1878,6 +1982,7 @@ Roger Pau Monné <royger@gmail.com>
Roger Peppe <rogpeppe@gmail.com>
Rohan Challa <rohan@golang.org>
Rohan Verma <rohanverma2004@gmail.com>
Rohith Ravi <entombedvirus@gmail.com>
Roland Illig <roland.illig@gmx.de>
Roland Shoemaker <rolandshoemaker@gmail.com>
Romain Baugue <romain.baugue@elwinar.com>
@ -1887,6 +1992,7 @@ Roman Shchekin <mrqtros@gmail.com>
Ron Hashimoto <mail@h2so5.net>
Ron Minnich <rminnich@gmail.com>
Ross Chater <rdchater@gmail.com>
Ross Kinsey <rossikinsey@gmail.com>
Ross Light <light@google.com> <rlight2@gmail.com>
Ross Smith II <ross@smithii.com>
Rowan Marshall <rowanajmarshall@gmail.com>
@ -1921,6 +2027,8 @@ Sakeven Jiang <jc5930@sina.cn>
Salmān Aljammāz <s@0x65.net>
Sam Arnold <sarnold64@bloomberg.net>
Sam Boyer <tech@samboyer.org>
Sam Chen <chenxsan@gmail.com>
Sam Cross <samgcdev@gmail.com>
Sam Ding <samding@ca.ibm.com>
Sam Hug <samuel.b.hug@gmail.com>
Sam Thorogood <thorogood@google.com> <sam.thorogood@gmail.com>
@ -1972,6 +2080,7 @@ Sergey 'SnakE' Gromov <snake.scaly@gmail.com>
Sergey Arseev <sergey.arseev@intel.com>
Sergey Dobrodey <sergey.dobrodey@synesis.ru>
Sergey Frolov <sfrolov@google.com>
Sergey Glushchenko <gsserge@gmail.com>
Sergey Ivanov <ser1325@gmail.com>
Sergey Lukjanov <me@slukjanov.name>
Sergey Mishin <sergeymishine@gmail.com>
@ -1987,7 +2096,9 @@ Seth Vargo <sethvargo@gmail.com>
Shahar Kohanim <skohanim@gmail.com>
Shamil Garatuev <garatuev@gmail.com>
Shane Hansen <shanemhansen@gmail.com>
Shang Jian Ding <sding3@ncsu.edu>
Shaozhen Ding <dsz0111@gmail.com>
Shaquille Wyan Que <shaqqywyan@gmail.com>
Shaun Dunning <shaun.dunning@uservoice.com>
Shawn Elliott <selliott@microsoft.com>
Shawn Ledbetter <sledbetter@google.com>
@ -2008,6 +2119,7 @@ Shubham Sharma <shubham.sha12@gmail.com>
Shun Fan <sfan@google.com>
Silvan Jegen <s.jegen@gmail.com>
Simarpreet Singh <simar@linux.com>
Simon Drake <simondrake1990@gmail.com>
Simon Ferquel <simon.ferquel@docker.com>
Simon Jefford <simon.jefford@gmail.com>
Simon Rawet <simon@rawet.se>
@ -2018,6 +2130,8 @@ Sina Siadat <siadat@gmail.com>
Sjoerd Siebinga <sjoerd.siebinga@gmail.com>
Sokolov Yura <funny.falcon@gmail.com>
Song Gao <song@gao.io>
Soojin Nam <jsunam@gmail.com>
Søren L. Hansen <soren@linux2go.dk>
Spencer Kocot <spencerkocot@gmail.com>
Spencer Nelson <s@spenczar.com>
Spencer Tung <spencertung@google.com>
@ -2074,6 +2188,7 @@ Taavi Kivisik <taavi.kivisik@gmail.com>
Tad Fisher <tadfisher@gmail.com>
Tad Glines <tad.glines@gmail.com>
Tadas Valiukas <tadovas@gmail.com>
Tadeo Kondrak <me@tadeo.ca>
Taesu Pyo <pyotaesu@gmail.com>
Tai Le <letientai299@gmail.com>
Taj Khattra <taj.khattra@gmail.com>
@ -2083,6 +2198,7 @@ Takeshi YAMANASHI <9.nashi@gmail.com>
Takuto Ikuta <tikuta@google.com>
Takuya Ueda <uedatakuya@gmail.com>
Tal Shprecher <tshprecher@gmail.com>
Tamás Gulácsi <tgulacsi78@gmail.com>
Tamir Duberstein <tamird@gmail.com>
Tao Qingyun <qingyunha@gmail.com>
Tao Shen <shentaoskyking@gmail.com>
@ -2102,6 +2218,7 @@ Tetsuo Kiso <tetsuokiso9@gmail.com>
Than McIntosh <thanm@google.com>
Thanabodee Charoenpiriyakij <wingyminus@gmail.com>
Thanatat Tamtan <acoshift@gmail.com>
The Hatsune Daishi <nao20010128@gmail.com>
Thiago Avelino <t@avelino.xxx>
Thiago Fransosi Farina <thiago.farina@gmail.com> <tfarina@chromium.org>
Thomas Alan Copeland <talan.copeland@gmail.com>
@ -2128,9 +2245,11 @@ Tim Ebringer <tim.ebringer@gmail.com>
Tim Heckman <t@heckman.io>
Tim Henderson <tim.tadh@gmail.com>
Tim Hockin <thockin@google.com>
Tim Möhlmann <muhlemmer@gmail.com>
Tim Swast <swast@google.com>
Tim Wright <tenortim@gmail.com>
Tim Xu <xiaoxubeii@gmail.com>
Timmy Douglas <timmyd983@gmail.com>
Timo Savola <timo.savola@gmail.com>
Timo Truyts <alkaloid.btx@gmail.com>
Timothy Studd <tim@timstudd.com>
@ -2149,6 +2268,7 @@ Tom Lanyon <tomlanyon@google.com>
Tom Levy <tomlevy93@gmail.com>
Tom Limoncelli <tal@whatexit.org>
Tom Linford <tomlinford@gmail.com>
Tom Parkin <tom.parkin@gmail.com>
Tom Payne <twpayne@gmail.com>
Tom Szymanski <tgs@google.com>
Tom Thorogood <me+google@tomthorogood.co.uk>
@ -2162,6 +2282,7 @@ Tony Reix <tony.reix@bull.net>
Tony Walker <walkert.uk@gmail.com>
Tooru Takahashi <tooru.takahashi134@gmail.com>
Tor Andersson <tor.andersson@gmail.com>
Torben Schinke <torben.schinke@neotos.de>
Tormod Erevik Lea <tormodlea@gmail.com>
Toshihiro Shiino <shiino.toshihiro@gmail.com>
Toshiki Shima <hayabusa1419@gmail.com>
@ -2178,12 +2299,15 @@ Tristan Ooohry <ooohry@gmail.com>
Tristan Rice <rice@fn.lc>
Troels Thomsen <troels@thomsen.io>
Trung Nguyen <trung.n.k@gmail.com>
Tsuji Daishiro <dram.dt.shonan@gmail.com>
Tudor Golubenco <tudor.g@gmail.com>
Tugdual Saunier <tugdual.saunier@gmail.com>
Tuo Shan <sturbo89@gmail.com> <shantuo@google.com>
Tyler Bui-Palsulich <tpalsulich@google.com>
Tyler Bunnell <tylerbunnell@gmail.com>
Tyler Treat <ttreat31@gmail.com>
Tyson Andre <tysonandre775@gmail.com>
Tzach Shabtay <tzachshabtay@gmail.com>
Tzu-Jung Lee <roylee17@currant.com>
Udalov Max <re.udalov@gmail.com>
Ugorji Nwoke <ugorji@gmail.com>
@ -2217,6 +2341,7 @@ Visweswara R <r.visweswara@gmail.com>
Vitaly Zdanevich <zdanevich.vitaly@ya.ru>
Vitor De Mario <vitordemario@gmail.com>
Vivek Sekhar <vsekhar@google.com>
Vivian Liang <vliang88@gmail.com>
Vlad Krasnov <vlad@cloudflare.com>
Vladimir Evgrafov <evgrafov.vladimir@gmail.com>
Vladimir Kovpak <cn007b@gmail.com>
@ -2231,6 +2356,7 @@ Volodymyr Paprotski <vpaprots@ca.ibm.com>
W. Trevor King <wking@tremily.us>
Wade Simmons <wade@wades.im>
Wagner Riffel <wgrriffel@gmail.com>
Walt Della <walt@javins.net>
Walter Poupore <wpoupore@google.com>
Wander Lairson Costa <wcosta@mozilla.com>
Wang Xuerui <git@xen0n.name>
@ -2274,12 +2400,15 @@ Xudong Zheng <7pkvm5aw@slicealias.com>
Xuyang Kang <xuyangkang@gmail.com>
Yamagishi Kazutoshi <ykzts@desire.sh>
Yan Zou <yzou@google.com>
Yang Hau <vulxj0j8j8@gmail.com>
Yang Tian <linuxty@gmail.com>
Yann Hodique <yhodique@google.com>
Yann Kerhervé <yann.kerherve@gmail.com>
Yann Salaün <yannsalaun1@gmail.com>
Yannic Bonenberger <contact@yannic-bonenberger.com>
Yao Zhang <lunaria21@gmail.com>
Yaron de Leeuw <jarondl@google.com>
Yaroslav Vorobiov <yar.vorobiov@gmail.com>
Yasha Bubnov <girokompass@gmail.com>
Yasser Abdolmaleki <yasser@yasser.ca>
Yasuharu Goto <matope.ono@gmail.com>
@ -2298,6 +2427,7 @@ Yoshiyuki Mineo <yoshiyuki.mineo@gmail.com>
Yosuke Akatsuka <yosuke.akatsuka@gmail.com>
Yu Heng Zhang <annita.zhang@cn.ibm.com>
Yu Xuan Zhang <zyxsh@cn.ibm.com>
Yuichi Kishimoto <yk2220s@gmail.com>
Yuichi Nishiwaki <yuichi.nishiwaki@gmail.com>
Yuji Yaginuma <yuuji.yaginuma@gmail.com>
Yuki OKUSHI <huyuumi.dev@gmail.com>
@ -2318,6 +2448,7 @@ Zak <zrjknill@gmail.com>
Zakatell Kanda <hi@zkanda.io>
Zellyn Hunter <zellyn@squareup.com> <zellyn@gmail.com>
Zev Goldstein <zev.goldstein@gmail.com>
Zhang Boyang <zhangboyang.id@gmail.com>
Zheng Dayu <davidzheng23@gmail.com>
Zheng Xu <zheng.xu@arm.com>
Zhengyu He <hzy@google.com>

View file

@ -456,3 +456,4 @@ pkg syscall (freebsd-arm-cgo), type Statfs_t struct, Mntonname [88]int8
pkg text/scanner, const GoTokens = 1012
pkg unicode, const Version = "10.0.0"
pkg unicode, const Version = "11.0.0"
pkg unicode, const Version = "12.0.0"

View file

@ -112,8 +112,6 @@ pkg debug/pe, const IMAGE_SUBSYSTEM_WINDOWS_GUI = 2
pkg debug/pe, const IMAGE_SUBSYSTEM_WINDOWS_GUI ideal-int
pkg debug/pe, const IMAGE_SUBSYSTEM_XBOX = 14
pkg debug/pe, const IMAGE_SUBSYSTEM_XBOX ideal-int
pkg go/printer, const StdFormat = 16
pkg go/printer, const StdFormat Mode
pkg math/big, method (*Int) FillBytes([]uint8) []uint8
pkg net, method (*Resolver) LookupIP(context.Context, string, string) ([]IP, error)
pkg net/url, method (*URL) EscapedFragment() string

View file

@ -0,0 +1,19 @@
pkg unicode, const Version = "13.0.0"
pkg unicode, var Chorasmian *RangeTable
pkg unicode, var Dives_Akuru *RangeTable
pkg unicode, var Khitan_Small_Script *RangeTable
pkg unicode, var Yezidi *RangeTable
pkg text/template/parse, const NodeComment = 20
pkg text/template/parse, const NodeComment NodeType
pkg text/template/parse, const ParseComments = 1
pkg text/template/parse, const ParseComments Mode
pkg text/template/parse, method (*CommentNode) Copy() Node
pkg text/template/parse, method (*CommentNode) String() string
pkg text/template/parse, method (CommentNode) Position() Pos
pkg text/template/parse, method (CommentNode) Type() NodeType
pkg text/template/parse, type CommentNode struct
pkg text/template/parse, type CommentNode struct, Text string
pkg text/template/parse, type CommentNode struct, embedded NodeType
pkg text/template/parse, type CommentNode struct, embedded Pos
pkg text/template/parse, type Mode uint
pkg text/template/parse, type Tree struct, Mode Mode

View file

@ -257,6 +257,7 @@ To use the <code>net/http</code> package, it must be imported:
import (
"fmt"
"io/ioutil"
"log"
<b>"net/http"</b>
)
</pre>

View file

@ -687,6 +687,13 @@ MOVQ g(CX), AX // Move g into AX.
MOVQ g_m(AX), BX // Move g.m into BX.
</pre>
<p>
Register <code>BP</code> is callee-save.
The assembler automatically inserts <code>BP</code> save/restore when frame size is larger than zero.
Using <code>BP</code> as a general purpose register is allowed,
however it can interfere with sampling-based profiling.
</p>
<h3 id="arm">ARM</h3>
<p>

View file

@ -45,8 +45,8 @@ CLA (Contributor License Agreement).
</li>
<li>
<b>Step 2</b>: Configure authentication credentials for the Go Git repository.
Visit <a href="https://go.googlesource.com/">go.googlesource.com</a>, click
on the gear icon (top right), then on "Obtain password", and follow the
Visit <a href="https://go.googlesource.com">go.googlesource.com</a>, click
"Generate Password" in the page's top right menu bar, and follow the
instructions.
</li>
<li>

View file

@ -609,6 +609,12 @@ Do not send CLs removing the interior tags from such phrases.
If a program needs to accept invalid numbers like the empty string,
consider wrapping the type with <a href="/pkg/encoding/json/#Unmarshaler"><code>Unmarshaler</code></a>.
</p>
<p><!-- CL 200237 -->
<a href="/pkg/encoding/json/#Unmarshal"><code>Unmarshal</code></a>
can now support map keys with string underlying type which implement
<a href="/pkg/encoding/#TextUnmarshaler"><code>encoding.TextUnmarshaler</code></a>.
</p>
</dd>
</dl><!-- encoding/json -->

View file

@ -14,13 +14,21 @@ Do not send CLs removing the interior tags from such phrases.
main ul li { margin: 0.5em 0; }
</style>
<h2 id="introduction">DRAFT RELEASE NOTES — Introduction to Go 1.15</h2>
<h2 id="introduction">Introduction to Go 1.15</h2>
<p>
<strong>
Go 1.15 is not yet released. These are work-in-progress
release notes. Go 1.15 is expected to be released in August 2020.
</strong>
The latest Go release, version 1.15, arrives six months after <a href="go1.14">Go 1.14</a>.
Most of its changes are in the implementation of the toolchain, runtime, and libraries.
As always, the release maintains the Go 1 <a href="/doc/go1compat.html">promise of compatibility</a>.
We expect almost all Go programs to continue to compile and run as before.
</p>
<p>
Go 1.15 includes <a href="#linker">substantial improvements to the linker</a>,
improves <a href="#runtime">allocation for small objects at high core counts</a>, and
deprecates <a href="#commonname">X.509 CommonName</a>.
<code>GOPROXY</code> now supports skipping proxies that return errors and
a new <a href="#time/tzdata">embedded tzdata package</a> has been added.
</p>
<h2 id="language">Changes to the language</h2>
@ -94,6 +102,16 @@ Do not send CLs removing the interior tags from such phrases.
preemption.
</p>
<h3 id="386">386</h3>
<p><!-- golang.org/issue/40255 -->
Go 1.15 is the last release to support x87-only floating-point
hardware (<code>GO386=387</code>). Future releases will require at
least SSE2 support on 386, raising Go's
minimum <code>GOARCH=386</code> requirement to the Intel Pentium 4
(released in 2000) or AMD Opteron/Athlon 64 (released in 2003).
</p>
<h2 id="tools">Tools</h2>
<h3 id="go-command">Go command</h3>
@ -336,8 +354,13 @@ Do not send CLs removing the interior tags from such phrases.
</p>
<p><!-- CL 207877 -->
TODO: <a href="https://golang.org/cl/207877">https://golang.org/cl/207877</a>: Revert -buildmode=pie to internal linking.
The linker defaults to internal linking mode for PIE on linux/amd64 and linux/arm64, which does require a C linker.
The linker now defaults to internal linking mode
for <code>-buildmode=pie</code> on
<code>linux/amd64</code> and <code>linux/arm64</code>, so these
configurations no longer require a C linker. External linking
mode (which was the default in Go 1.14 for
<code>-buildmode=pie</code>) can still be requested with
<code>-ldflags=-linkmode=external</code> flag.
</p>
<h2 id="objdump">Objdump</h2>
@ -374,6 +397,23 @@ Do not send CLs removing the interior tags from such phrases.
documentation</a> for more information.
</p>
<h3 id="commonname">X.509 CommonName deprecation</h3>
<p><!-- CL 231379 -->
The deprecated, legacy behavior of treating the <code>CommonName</code>
field on X.509 certificates as a host name when no Subject Alternative Names
are present is now disabled by default. It can be temporarily re-enabled by
adding the value <code>x509ignoreCN=0</code> to the <code>GODEBUG</code>
environment variable.
</p>
<p>
Note that if the <code>CommonName</code> is an invalid host name, it's always
ignored, regardless of <code>GODEBUG</code> settings. Invalid names include
those with any characters other than letters, digits, hyphens and underscores,
and those with empty labels or trailing dots.
</p>
<h3 id="minor_library_changes">Minor changes to the library</h3>
<p>
@ -396,6 +436,19 @@ Do not send CLs removing the interior tags from such phrases.
</dd>
</dl><!-- bufio -->
<dl id="context"><dt><a href="/pkg/context/">context</a></dt>
<dd>
<p><!-- CL 223777 -->
Creating a derived <code>Context</code> using a nil parent is now explicitly
disallowed. Any attempt to do so with the
<a href="/pkg/context/#WithValue"><code>WithValue</code></a>,
<a href="/pkg/context/#WithDeadline"><code>WithDeadline</code></a>, or
<a href="/pkg/context/#WithCancel"><code>WithCancel</code></a> functions
will cause a panic.
</p>
</dd>
</dl><!-- context -->
<dl id="crypto"><dt><a href="/pkg/crypto/">crypto</a></dt>
<dd>
<p><!-- CL 231417, CL 225460 -->
@ -495,6 +548,17 @@ Do not send CLs removing the interior tags from such phrases.
fields <code>OCSPResponse</code> and <code>SignedCertificateTimestamps</code>
are now repopulated on client-side resumed connections.
</p>
<p><!-- CL 227840 -->
<a href="/pkg/crypto/tls/#Conn"><code>tls.Conn</code></a>
now returns an opaque error on permanently broken connections, wrapping
the temporary
<a href="/pkg/net/http/#Error"><code>net.Error</code></a>. To access the
original <code>net.Error</code>, use
<a href="/pkg/errors/#As"><code>errors.As</code></a> (or
<a href="/pkg/errors/#Unwrap"><code>errors.Unwrap</code></a>) instead of a
type assertion.
</p>
</dd>
</dl><!-- crypto/tls -->
@ -511,15 +575,6 @@ Do not send CLs removing the interior tags from such phrases.
certificates with trailing dots.
</p>
<p><!-- CL 231379 -->
The deprecated, legacy behavior of treating the <code>CommonName</code>
field as a hostname when no Subject Alternative Names are present is now
disabled by default. It can be temporarily re-enabled by adding the value
<code>x509ignoreCN=0</code> to the <code>GODEBUG</code> environment
variable. If the <code>CommonName</code> is an invalid hostname, it's
always ignored.
</p>
<p><!-- CL 217298 -->
The new <a href="/pkg/crypto/x509/#CreateRevocationList"><code>CreateRevocationList</code></a>
function and <a href="/pkg/crypto/x509/#RevocationList"><code>RevocationList</code></a> type
@ -618,11 +673,6 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="encoding/json"><dt><a href="/pkg/encoding/json/">encoding/json</a></dt>
<dd>
<p><!-- CL 191783 -->
Decoding a JSON array into a slice no longer reuses any existing slice elements,
following the rules that the package documentation already stated.
</p>
<p><!-- CL 199837 -->
The package now has an internal limit to the maximum depth of
nesting when decoding. This reduces the possibility that a
@ -635,8 +685,8 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="flag"><dt><a href="/pkg/flag/">flag</a></dt>
<dd>
<p><!-- CL 221427 -->
When the flag package sees <code>-h</code> or <code>-help</code>, and
those flags are not defined, it now prints a usage message.
When the <code>flag</code> package sees <code>-h</code> or <code>-help</code>,
and those flags are not defined, it now prints a usage message.
If the <a href="/pkg/flag/#FlagSet"><code>FlagSet</code></a> was created with
<a href="/pkg/flag/#ExitOnError"><code>ExitOnError</code></a>,
<a href="/pkg/flag/#FlagSet.Parse"><code>FlagSet.Parse</code></a> would then
@ -656,15 +706,18 @@ Do not send CLs removing the interior tags from such phrases.
</dd>
</dl><!-- fmt -->
<dl id="go/printer"><dt><a href="/pkg/go/printer/">go/printer</a></dt>
<dl id="go/format"><dt><a href="/pkg/go/format/">go/format</a></dt>
<dd>
<p><!-- CL 231461 -->
The new <a href="/pkg/go/printer/#Mode"><code>Mode</code></a>
value <a href="/pkg/go/printer/#StdFormat"><code>StdFormat</code></a>
directs the printer to apply standard formatting changes while
printing the output.
<p><!-- golang.org/issue/37476, CL 231461, CL 240683 -->
The <a href="/pkg/go/format/#Source"><code>Source</code></a> and
<a href="/pkg/go/format/#Node"><code>Node</code></a> functions
now canonicalize number literal prefixes and exponents as part
of formatting Go source code. This matches the behavior of the
<a href="/pkg/cmd/gofmt/"><code>gofmt</code></a> command as it
was implemented <a href="/doc/go1.13#gofmt">since Go 1.13</a>.
</p>
</dd>
</dl><!-- go/printer -->
</dl><!-- go/format -->
<dl id="html/template"><dt><a href="/pkg/html/template/">html/template</a></dt>
<dd>
@ -698,6 +751,16 @@ Do not send CLs removing the interior tags from such phrases.
</dd>
</dl><!-- math/big -->
<dl id="math/cmplx"><dt><a href="/pkg/math/cmplx/">math/cmplx</a></dt>
<dd>
<p><!-- CL 220689 -->
The functions in this package were updated to conform to the C99 standard
(Annex G IEC 60559-compatible complex arithmetic) with respect to handling
of special arguments such as infinity, NaN and signed zero.
</p>
</dd>
</dl><!-- math/cmplx-->
<dl id="net"><dt><a href="/pkg/net/">net</a></dt>
<dd>
<p><!-- CL 228645 -->
@ -868,9 +931,9 @@ Do not send CLs removing the interior tags from such phrases.
<dl id="pkg-runtime-pprof"><dt><a href="/pkg/runtime/pprof/">runtime/pprof</a></dt>
<dd>
<p><!-- CL 189318 -->
The goroutine profile includes the profile labels associated with each goroutine
at the time of profiling. This feature is not yet implemented for the profile
reported with <code>debug=2</code>.
The goroutine profile now includes the profile labels associated with each
goroutine at the time of profiling. This feature is not yet implemented for
the profile reported with <code>debug=2</code>.
</p>
</dd>
</dl>
@ -901,6 +964,7 @@ Do not send CLs removing the interior tags from such phrases.
<a href="/pkg/sync/#Map.Delete"><code>Map.Delete</code></a>
is more efficient.
</p>
</dd>
</dl><!-- sync -->
<dl id="syscall"><dt><a href="/pkg/syscall/">syscall</a></dt>
@ -956,7 +1020,8 @@ Do not send CLs removing the interior tags from such phrases.
</p>
<p><!-- CL 229085 -->
TODO: <a href="https://golang.org/cl/229085">https://golang.org/cl/229085</a>: reformat test chatty output
<code>go</code> <code>test</code> <code>-v</code> now groups output by
test name, rather than printing the test name on each line.
</p>
</dd>
</dl><!-- testing -->

224
doc/go1.16.html Normal file
View file

@ -0,0 +1,224 @@
<!--{
"Title": "Go 1.16 Release Notes",
"Path": "/doc/go1.16"
}-->
<!--
NOTE: In this document and others in this directory, the convention is to
set fixed-width phrases with non-fixed-width spaces, as in
<code>hello</code> <code>world</code>.
Do not send CLs removing the interior tags from such phrases.
-->
<style>
main ul li { margin: 0.5em 0; }
</style>
<h2 id="introduction">DRAFT RELEASE NOTES — Introduction to Go 1.16</h2>
<p>
<strong>
Go 1.16 is not yet released. These are work-in-progress
release notes. Go 1.16 is expected to be released in February 2021.
</strong>
</p>
<h2 id="language">Changes to the language</h2>
<p>
TODO
</p>
<h2 id="ports">Ports</h2>
<p>
TODO
</p>
<h2 id="tools">Tools</h2>
<p>
TODO
</p>
<h3 id="go-command">Go command</h3>
<h4 id="modules">Modules</h4>
<p><!-- golang.org/issue/40276 -->
<code>go</code> <code>install</code> now accepts arguments with
version suffixes (for example, <code>go</code> <code>install</code>
<code>example.com/cmd@v1.0.0</code>). This causes <code>go</code>
<code>install</code> to build and install packages in module-aware mode,
ignoring the <code>go.mod</code> file in the current directory or any parent
directory, if there is one. This is useful for installing executables without
affecting the dependencies of the main module.<br>
TODO: write and link to section in golang.org/ref/mod<br>
TODO: write and link to blog post
</p>
<p><!-- golang.org/issue/24031 -->
<code>retract</code> directives may now be used in a <code>go.mod</code> file
to indicate that certain published versions of the module should not be used
by other modules. A module author may retract a version after a severe problem
is discovered or if the version was published unintentionally.<br>
TODO: write and link to section in golang.org/ref/mod<br>
TODO: write and link to tutorial or blog post
</p>
<p><!-- golang.org/issue/26603 -->
The <code>go</code> <code>mod</code> <code>vendor</code>
and <code>go</code> <code>mod</code> <code>tidy</code> subcommands now accept
the <code>-e</code> flag, which instructs them to proceed despite errors in
resolving missing packages.
</p>
<h4 id="go-test"><code>go</code> <code>test</code></h4>
<p><!-- golang.org/issue/29062 -->
When using <code>go</code> <code>test</code>, a test that
calls <code>os.Exit(0)</code> during execution of a test function
will now be considered to fail.
This will help catch cases in which a test calls code that calls
<code>os.Exit(0)</code> and thereby stops running all future tests.
If a <code>TestMain</code> function calls <code>os.Exit(0)</code>
that is still considered to be a passing test.
</p>
<h4 id="all-pattern">The <code>all</code> pattern</h4>
<p><!-- golang.org/cl/240623 -->
When the main module's <code>go.mod</code> file
declares <code>go</code> <code>1.16</code> or higher, the <code>all</code>
package pattern now matches only those packages that are transitively imported
by a package or test found in the main module. (Packages imported by <em>tests
of</em> packages imported by the main module are no longer included.) This is
the same set of packages retained
by <code>go</code> <code>mod</code> <code>vendor</code> since Go 1.11.
</p>
<h3 id="cgo">Cgo</h3>
<p> <!-- CL 252378 -->
The <a href="/cmd/cgo">cgo</a> tool will no longer try to translate
C struct bitfields into Go struct fields, even if their size can be
represented in Go. The order in which C bitfields appear in memory
is implementation dependent, so in some cases the cgo tool produced
results that were silently incorrect.
</p>
<p>
TODO
</p>
<h2 id="runtime">Runtime</h2>
<p>
TODO
</p>
<h2 id="compiler">Compiler</h2>
<p>
TODO
</p>
<h2 id="linker">Linker</h2>
<p>
This release includes additional improvements to the Go linker,
reducing linker resource usage (both time and memory) and improving
code robustness/maintainability. These changes form the second half
of a two-release project to
<a href="https://golang.org/s/better-linker">modernize the Go
linker</a>.
</p>
<p>
The linker changes in 1.16 extend the 1.15 improvements to all
supported architecture/OS combinations (the 1.15 performance improvements
were primarily focused on <code>ELF</code>-based OSes and
<code>amd64</code> architectures). For a representative set of
large Go programs, linking is 20-35% faster than 1.15 and requires
5-15% less memory on average for <code>linux/amd64</code>, with larger
improvements for other architectures and OSes.
</p>
<p>
TODO: update with final numbers later in the release.
</p>
<h2 id="library">Core library</h2>
<p>
TODO
</p>
<h3 id="net"><a href="/pkg/net/">net</a></h3>
<p><!-- CL 250357 -->
The case of I/O on a closed network connection, or I/O on a network
connection that is closed before any of the I/O completes, can now
be detected using the new <a href="/pkg/net/#ErrClosed">ErrClosed</a> error.
A typical use would be <code>errors.Is(err, net.ErrClosed)</code>.
In earlier releases the only way to reliably detect this case was to
match the string returned by the <code>Error</code> method
with <code>"use of closed network connection"</code>.
</p>
<h3 id="text/template/parse"><a href="/pkg/text/template/parse/">text/template/parse</a></h3>
<p><!-- CL 229398, golang.org/issue/34652 -->
A new <a href="/pkg/text/template/parse/#CommentNode"><code>CommentNode</code></a>
was added to the parse tree. The <a href="/pkg/text/template/parse/#Mode"><code>Mode</code></a>
field in the <code>parse.Tree</code> enables access to it.
</p>
<!-- text/template/parse -->
<h3 id="unicode"><a href="/pkg/unicode/">unicode</a></h3>
<p><!-- CL 248765 -->
The <a href="/pkg/unicode/"><code>unicode</code></a> package and associated
support throughout the system has been upgraded from Unicode 12.0.0 to
<a href="https://www.unicode.org/versions/Unicode13.0.0/">Unicode 13.0.0</a>,
which adds 5,930 new characters, including four new scripts, and 55 new emoji.
Unicode 13.0.0 also designates plane 3 (U+30000-U+3FFFF) as the tertiary
ideographic plane.
</p>
<h3 id="minor_library_changes">Minor changes to the library</h3>
<p>
As always, there are various minor changes and updates to the library,
made with the Go 1 <a href="/doc/go1compat">promise of compatibility</a>
in mind.
</p>
<p>
TODO
</p>
<dl id="net/http"><dt><a href="/pkg/net/http/">net/http</a></dt>
<dd>
<p><!-- CL 233637 -->
In the <a href="/pkg/net/http/"><code>net/http</code></a> package, the
behavior of <a href="/pkg/net/http/#StripPrefix"><code>StripPrefix</code></a>
has been changed to strip the prefix from the request URL's
<code>RawPath</code> field in addition to its <code>Path</code> field.
In past releases, only the <code>Path</code> field was trimmed, and so if the
request URL contained any escaped characters the URL would be modified to
have mismatched <code>Path</code> and <code>RawPath</code> fields.
In Go 1.16, <code>StripPrefix</code> trims both fields.
If there are escaped characters in the prefix part of the request URL the
handler serves a 404 instead of its previous behavior of invoking the
underlying handler with a mismatched <code>Path</code>/<code>RawPath</code> pair.
</p>
<p><!-- CL 252497 -->
The <a href="/pkg/net/http/"><code>net/http</code></a> package now rejects HTTP range requests
of the form <code>"Range": "bytes=--N"</code> where <code>"-N"</code> is a negative suffix length, for
example <code>"Range": "bytes=--2"</code>. It now replies with a <code>416 "Range Not Satisfiable"</code> response.
</p>
</dd>
</dl><!-- net/http -->

View file

@ -507,8 +507,8 @@ These default to the values of <code>$GOHOSTOS</code> and
<p>
Choices for <code>$GOOS</code> are
<code>android</code>, <code>darwin</code> (macOS/iOS),
<code>dragonfly</code>, <code>freebsd</code>, <code>illumos</code>, <code>js</code>,
<code>android</code>, <code>darwin</code>, <code>dragonfly</code>,
<code>freebsd</code>, <code>illumos</code>, <code>ios</code>, <code>js</code>,
<code>linux</code>, <code>netbsd</code>, <code>openbsd</code>,
<code>plan9</code>, <code>solaris</code> and <code>windows</code>.
</p>
@ -567,6 +567,9 @@ The valid combinations of <code>$GOOS</code> and <code>$GOARCH</code> are:
<td></td><td><code>illumos</code></td> <td><code>amd64</code></td>
</tr>
<tr>
<td></td><td><code>ios</code></td> <td><code>arm64</code></td>
</tr>
<tr>
<td></td><td><code>js</code></td> <td><code>wasm</code></td>
</tr>
<tr>
@ -600,6 +603,9 @@ The valid combinations of <code>$GOOS</code> and <code>$GOARCH</code> are:
<td></td><td><code>linux</code></td> <td><code>mips64le</code></td>
</tr>
<tr>
<td></td><td><code>linux</code></td> <td><code>riscv64</code></td>
</tr>
<tr>
<td></td><td><code>linux</code></td> <td><code>s390x</code></td>
</tr>
<tr>

View file

@ -105,7 +105,7 @@ func test(tmpdir, file, want string) error {
// Canonicalize output.
out = bytes.TrimRight(out, "\n")
out = bytes.Replace(out, []byte{'\n'}, []byte{' '}, -1)
out = bytes.ReplaceAll(out, []byte{'\n'}, []byte{' '})
// Check the result.
match, err := regexp.Match(want, out)

View file

@ -24,7 +24,7 @@ func test18146(t *testing.T) {
t.Skip("skipping in short mode")
}
if runtime.GOOS == "darwin" {
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
t.Skipf("skipping flaky test on %s; see golang.org/issue/18202", runtime.GOOS)
}

View file

@ -30,7 +30,7 @@ func TestCrossPackageTests(t *testing.T) {
switch runtime.GOOS {
case "android":
t.Skip("Can't exec cmd/go subprocess on Android.")
case "darwin":
case "darwin", "ios":
switch runtime.GOARCH {
case "arm64":
t.Skip("Can't exec cmd/go subprocess on iOS.")

View file

@ -62,7 +62,7 @@ import (
func testSigaltstack(t *testing.T) {
switch {
case runtime.GOOS == "solaris", runtime.GOOS == "illumos", runtime.GOOS == "darwin" && runtime.GOARCH == "arm64":
case runtime.GOOS == "solaris", runtime.GOOS == "illumos", (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64":
t.Skipf("switching signal stack not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}

View file

@ -319,6 +319,7 @@ typedef enum {
// issue 4339
// We've historically permitted #include <>, so test it here. Issue 29333.
// Also see issue 41059.
#include <issue4339.h>
// issue 4417
@ -901,6 +902,12 @@ typedef struct S32579 { unsigned char data[1]; } S32579;
// issue 38649
// Test that #define'd type aliases work.
#define netbsd_gid unsigned int
// issue 40494
// Inconsistent handling of tagged enum and union types.
enum Enum40494 { X_40494 };
union Union40494 { int x; };
void issue40494(enum Enum40494 e, union Union40494* up) {}
*/
import "C"
@ -1769,7 +1776,7 @@ func test14838(t *testing.T) {
var sink C.int
func test17065(t *testing.T) {
if runtime.GOOS == "darwin" {
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
t.Skip("broken on darwin; issue 17065")
}
for i := range C.ii {
@ -2204,3 +2211,10 @@ var issue38649 C.netbsd_gid = 42
// issue 39877
var issue39877 *C.void = nil
// issue 40494
// No runtime test; just make sure it compiles.
func Issue40494() {
C.issue40494(C.enum_Enum40494(C.X_40494), (*C.union_Union40494)(nil))
}

View file

@ -164,7 +164,7 @@ func Add(x int) {
}
func testCthread(t *testing.T) {
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" {
if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" {
t.Skip("the iOS exec wrapper is unable to properly handle the panic from Add")
}
sum.i = 0

View file

@ -118,9 +118,9 @@ func testMain(m *testing.M) int {
cc = append(cc, s[start:])
}
if GOOS == "darwin" {
if GOOS == "darwin" || GOOS == "ios" {
// For Darwin/ARM.
// TODO(crawshaw): can we do better?
// TODO: do we still need this?
cc = append(cc, []string{"-framework", "CoreFoundation", "-framework", "Foundation"}...)
}
if GOOS == "aix" {
@ -133,7 +133,7 @@ func testMain(m *testing.M) int {
libbase = "gccgo_" + libgodir + "_fPIC"
} else {
switch GOOS {
case "darwin":
case "darwin", "ios":
if GOARCH == "arm64" {
libbase += "_shared"
}
@ -303,7 +303,7 @@ func TestInstall(t *testing.T) {
func TestEarlySignalHandler(t *testing.T) {
switch GOOS {
case "darwin":
case "darwin", "ios":
switch GOARCH {
case "arm64":
t.Skipf("skipping on %s/%s; see https://golang.org/issue/13701", GOOS, GOARCH)
@ -384,7 +384,7 @@ func TestSignalForwarding(t *testing.T) {
expectSignal(t, err, syscall.SIGSEGV)
// SIGPIPE is never forwarded on darwin. See golang.org/issue/33384.
if runtime.GOOS != "darwin" {
if runtime.GOOS != "darwin" && runtime.GOOS != "ios" {
// Test SIGPIPE forwarding
cmd = exec.Command(bin[0], append(bin[1:], "3")...)
@ -485,7 +485,7 @@ func TestSignalForwardingExternal(t *testing.T) {
// doesn't work on this platform.
func checkSignalForwardingTest(t *testing.T) {
switch GOOS {
case "darwin":
case "darwin", "ios":
switch GOARCH {
case "arm64":
t.Skipf("skipping on %s/%s; see https://golang.org/issue/13701", GOOS, GOARCH)
@ -603,7 +603,7 @@ func TestExtar(t *testing.T) {
if runtime.Compiler == "gccgo" {
t.Skip("skipping -extar test when using gccgo")
}
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" {
if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" {
t.Skip("shell scripts are not executable on iOS hosts")
}
@ -645,7 +645,7 @@ func TestExtar(t *testing.T) {
func TestPIE(t *testing.T) {
switch GOOS {
case "windows", "darwin", "plan9":
case "windows", "darwin", "ios", "plan9":
t.Skipf("skipping PIE test on %s", GOOS)
}
@ -738,7 +738,7 @@ func TestSIGPROF(t *testing.T) {
switch GOOS {
case "windows", "plan9":
t.Skipf("skipping SIGPROF test on %s", GOOS)
case "darwin":
case "darwin", "ios":
t.Skipf("skipping SIGPROF test on %s; see https://golang.org/issue/19320", GOOS)
}
@ -841,7 +841,7 @@ func TestCompileWithoutShared(t *testing.T) {
expectSignal(t, err, syscall.SIGSEGV)
// SIGPIPE is never forwarded on darwin. See golang.org/issue/33384.
if runtime.GOOS != "darwin" {
if runtime.GOOS != "darwin" && runtime.GOOS != "ios" {
binArgs := append(cmdToRun(exe), "3")
t.Log(binArgs)
out, err = exec.Command(binArgs[0], binArgs[1:]...).CombinedOutput()

View file

@ -98,7 +98,7 @@ func testMain(m *testing.M) int {
}
switch GOOS {
case "darwin":
case "darwin", "ios":
// For Darwin/ARM.
// TODO(crawshaw): can we do better?
cc = append(cc, []string{"-framework", "CoreFoundation", "-framework", "Foundation"}...)
@ -107,7 +107,7 @@ func testMain(m *testing.M) int {
}
libgodir := GOOS + "_" + GOARCH
switch GOOS {
case "darwin":
case "darwin", "ios":
if GOARCH == "arm64" {
libgodir += "_shared"
}
@ -407,7 +407,7 @@ func TestUnexportedSymbols(t *testing.T) {
adbPush(t, libname)
linkFlags := "-Wl,--no-as-needed"
if GOOS == "darwin" {
if GOOS == "darwin" || GOOS == "ios" {
linkFlags = ""
}
@ -636,7 +636,7 @@ func copyFile(t *testing.T, dst, src string) {
func TestGo2C2Go(t *testing.T) {
switch GOOS {
case "darwin":
case "darwin", "ios":
// Darwin shared libraries don't support the multiple
// copies of the runtime package implied by this test.
t.Skip("linking c-shared into Go programs not supported on Darwin; issue 29061")

View file

@ -0,0 +1,31 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// +build ignore
package main
// This file tests that we don't generate an incorrect field location
// for a bitfield that appears aligned.
/*
struct bitfields {
unsigned int B1 : 5;
unsigned int B2 : 1;
unsigned int B3 : 1;
unsigned int B4 : 1;
unsigned int Short1 : 16; // misaligned on 8 bit boundary
unsigned int B5 : 1;
unsigned int B6 : 1;
unsigned int B7 : 1;
unsigned int B8 : 1;
unsigned int B9 : 1;
unsigned int B10 : 3;
unsigned int Short2 : 16; // alignment is OK
unsigned int Short3 : 16; // alignment is OK
};
*/
import "C"
type bitfields C.struct_bitfields

View file

@ -4,6 +4,12 @@
package main
import (
"fmt"
"os"
"reflect"
)
// Test that the struct field in anonunion.go was promoted.
var v1 T
var v2 = v1.L
@ -23,4 +29,26 @@ var v7 = S{}
var _ = issue38649{X: 0}
func main() {
pass := true
// The Go translation of bitfields should not have any of the
// bitfield types. The order in which bitfields are laid out
// in memory is implementation defined, so we can't easily
// know how a bitfield should correspond to a Go type, even if
// it appears to be aligned correctly.
bitfieldType := reflect.TypeOf(bitfields{})
check := func(name string) {
_, ok := bitfieldType.FieldByName(name)
if ok {
fmt.Fprintf(os.Stderr, "found unexpected bitfields field %s\n", name)
pass = false
}
}
check("Short1")
check("Short2")
check("Short3")
if !pass {
os.Exit(1)
}
}

View file

@ -19,6 +19,7 @@ import (
// import "C" block. Add more tests here.
var filePrefixes = []string{
"anonunion",
"bitfields",
"issue8478",
"fieldtypedef",
"issue37479",

View file

@ -462,6 +462,7 @@ func TestTrivialExecutable(t *testing.T) {
run(t, "trivial executable", "../../bin/trivial")
AssertIsLinkedTo(t, "../../bin/trivial", soname)
AssertHasRPath(t, "../../bin/trivial", gorootInstallDir)
checkSize(t, "../../bin/trivial", 100000) // it is 19K on linux/amd64, 100K should be enough
}
// Build a trivial program in PIE mode that links against the shared runtime and check it runs.
@ -470,6 +471,18 @@ func TestTrivialExecutablePIE(t *testing.T) {
run(t, "trivial executable", "./trivial.pie")
AssertIsLinkedTo(t, "./trivial.pie", soname)
AssertHasRPath(t, "./trivial.pie", gorootInstallDir)
checkSize(t, "./trivial.pie", 100000) // it is 19K on linux/amd64, 100K should be enough
}
// Check that the file size does not exceed a limit.
func checkSize(t *testing.T, f string, limit int64) {
fi, err := os.Stat(f)
if err != nil {
t.Fatalf("stat failed: %v", err)
}
if sz := fi.Size(); sz > limit {
t.Errorf("file too large: got %d, want <= %d", sz, limit)
}
}
// Build a division test program and check it runs.

View file

@ -21,7 +21,7 @@ func requireTestSOSupported(t *testing.T) {
t.Helper()
switch runtime.GOARCH {
case "arm64":
if runtime.GOOS == "darwin" {
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
t.Skip("No exec facility on iOS.")
}
case "ppc64":
@ -74,7 +74,7 @@ func TestSO(t *testing.T) {
ext := "so"
args := append(gogccflags, "-shared")
switch runtime.GOOS {
case "darwin":
case "darwin", "ios":
ext = "dylib"
args = append(args, "-undefined", "suppress", "-flat_namespace")
case "windows":
@ -119,7 +119,7 @@ func TestSO(t *testing.T) {
cmd.Env = append(os.Environ(), "GOPATH="+GOPATH)
if runtime.GOOS != "windows" {
s := "LD_LIBRARY_PATH"
if runtime.GOOS == "darwin" {
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
s = "DYLD_LIBRARY_PATH"
}
cmd.Env = append(os.Environ(), s+"=.")

View file

@ -21,7 +21,7 @@ func requireTestSOSupported(t *testing.T) {
t.Helper()
switch runtime.GOARCH {
case "arm64":
if runtime.GOOS == "darwin" {
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
t.Skip("No exec facility on iOS.")
}
case "ppc64":
@ -74,7 +74,7 @@ func TestSO(t *testing.T) {
ext := "so"
args := append(gogccflags, "-shared")
switch runtime.GOOS {
case "darwin":
case "darwin", "ios":
ext = "dylib"
args = append(args, "-undefined", "suppress", "-flat_namespace")
case "windows":
@ -119,7 +119,7 @@ func TestSO(t *testing.T) {
cmd.Env = append(os.Environ(), "GOPATH="+GOPATH)
if runtime.GOOS != "windows" {
s := "LD_LIBRARY_PATH"
if runtime.GOOS == "darwin" {
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
s = "DYLD_LIBRARY_PATH"
}
cmd.Env = append(os.Environ(), s+"=.")

View file

@ -15,4 +15,4 @@ else
exit 1
fi
exec $CLANG -arch $CLANGARCH -isysroot $SDK_PATH -mios-version-min=10.0 "$@"
exec "$CLANG" -arch $CLANGARCH -isysroot "$SDK_PATH" -mios-version-min=10.0 "$@"

View file

@ -11,6 +11,7 @@
// - Node.js
// - Electron
// - Parcel
// - Webpack
if (typeof global !== "undefined") {
// global already exists
@ -28,7 +29,7 @@
if (!global.fs && global.require) {
const fs = require("fs");
if (Object.keys(fs) !== 0) {
if (typeof fs === "object" && fs !== null && Object.keys(fs).length !== 0) {
global.fs = fs;
}
}
@ -556,6 +557,7 @@
}
if (
typeof module !== "undefined" &&
global.require &&
global.require.main === module &&
global.process &&

View file

@ -66,7 +66,7 @@ func statUnix(fi os.FileInfo, h *Header) error {
minor := uint32((dev & 0x00000000000000ff) >> 0)
minor |= uint32((dev & 0x00000ffffff00000) >> 12)
h.Devmajor, h.Devminor = int64(major), int64(minor)
case "darwin":
case "darwin", "ios":
// Copied from golang.org/x/sys/unix/dev_darwin.go.
major := uint32((dev >> 24) & 0xff)
minor := uint32(dev & 0xffffff)

View file

@ -425,7 +425,7 @@ func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
// of bytes in the combined first two elements, error).
// The complete result is equal to
// `bytes.Join(append(fullBuffers, finalFragment), nil)`, which has a
// length of `totalLen`. The result is strucured in this way to allow callers
// length of `totalLen`. The result is structured in this way to allow callers
// to minimize allocations and copies.
func (b *Reader) collectFragments(delim byte) (fullBuffers [][]byte, finalFragment []byte, totalLen int, err error) {
var frag []byte

View file

@ -73,29 +73,37 @@ func testAddr2Line(t *testing.T, exepath, addr string) {
if err != nil {
t.Fatalf("Stat failed: %v", err)
}
// Debug paths are stored slash-separated, so convert to system-native.
srcPath = filepath.FromSlash(srcPath)
fi2, err := os.Stat(srcPath)
if gorootFinal := os.Getenv("GOROOT_FINAL"); gorootFinal != "" && strings.HasPrefix(srcPath, gorootFinal) {
if os.IsNotExist(err) || (err == nil && !os.SameFile(fi1, fi2)) {
// srcPath has had GOROOT_FINAL substituted for GOROOT, and it doesn't
// match the actual file. GOROOT probably hasn't been moved to its final
// location yet, so try the original location instead.
// If GOROOT_FINAL is set and srcPath is not the file we expect, perhaps
// srcPath has had GOROOT_FINAL substituted for GOROOT and GOROOT hasn't been
// moved to its final location yet. If so, try the original location instead.
if gorootFinal := os.Getenv("GOROOT_FINAL"); gorootFinal != "" &&
(os.IsNotExist(err) || (err == nil && !os.SameFile(fi1, fi2))) {
// srcPath is clean, but GOROOT_FINAL itself might not be.
// (See https://golang.org/issue/41447.)
gorootFinal = filepath.Clean(gorootFinal)
if strings.HasPrefix(srcPath, gorootFinal) {
fi2, err = os.Stat(runtime.GOROOT() + strings.TrimPrefix(srcPath, gorootFinal))
}
}
if err != nil {
t.Fatalf("Stat failed: %v", err)
}
if !os.SameFile(fi1, fi2) {
t.Fatalf("addr2line_test.go and %s are not same file", srcPath)
}
if srcLineNo != "99" {
t.Fatalf("line number = %v; want 99", srcLineNo)
if srcLineNo != "107" {
t.Fatalf("line number = %v; want 107", srcLineNo)
}
}
// This is line 98. The test depends on that.
// This is line 106. The test depends on that.
func TestAddr2Line(t *testing.T) {
testenv.MustHaveGoBuild(t)

View file

@ -87,7 +87,10 @@ var contexts = []*build.Context{
func contextName(c *build.Context) string {
s := c.GOOS + "-" + c.GOARCH
if c.CgoEnabled {
return s + "-cgo"
s += "-cgo"
}
if c.Dir != "" {
s += fmt.Sprintf(" [%s]", c.Dir)
}
return s
}
@ -478,6 +481,9 @@ func (w *Walker) loadImports() {
cmd := exec.Command(goCmd(), "list", "-e", "-deps", "-json", "std")
cmd.Env = listEnv(w.context)
if w.context.Dir != "" {
cmd.Dir = w.context.Dir
}
out, err := cmd.CombinedOutput()
if err != nil {
log.Fatalf("loading imports: %v\n%s", err, out)
@ -491,6 +497,7 @@ func (w *Walker) loadImports() {
var pkg struct {
ImportPath, Dir string
ImportMap map[string]string
Standard bool
}
err := dec.Decode(&pkg)
if err == io.EOF {
@ -503,11 +510,13 @@ func (w *Walker) loadImports() {
// - Package "unsafe" contains special signatures requiring
// extra care when printing them - ignore since it is not
// going to change w/o a language change.
// - internal and vendored packages do not contribute to our
// API surface.
// - Internal and vendored packages do not contribute to our
// API surface. (If we are running within the "std" module,
// vendored dependencies appear as themselves instead of
// their "vendor/" standard-library copies.)
// - 'go list std' does not include commands, which cannot be
// imported anyway.
if ip := pkg.ImportPath; ip != "unsafe" && !strings.HasPrefix(ip, "vendor/") && !internalPkg.MatchString(ip) {
if ip := pkg.ImportPath; pkg.Standard && ip != "unsafe" && !strings.HasPrefix(ip, "vendor/") && !internalPkg.MatchString(ip) {
stdPackages = append(stdPackages, ip)
}
importDir[pkg.ImportPath] = pkg.Dir

View file

@ -216,3 +216,16 @@ func TestIssue29837(t *testing.T) {
}
}
}
func TestIssue41358(t *testing.T) {
context := new(build.Context)
*context = build.Default
context.Dir = filepath.Join(context.GOROOT, "src")
w := NewWalker(context, context.Dir)
for _, pkg := range w.stdPackages {
if strings.HasPrefix(pkg, "vendor/") || strings.HasPrefix(pkg, "golang.org/x/") {
t.Fatalf("stdPackages contains unexpected package %s", pkg)
}
}
}

View file

@ -77,6 +77,10 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
SHA1H V5, V4 // a408285e
SHA1M V8.S4, V7, V6 // e620085e
SHA1P V11.S4, V10, V9 // 49110b5e
SHA512H V2.D2, V1, V0 // 208062ce
SHA512H2 V4.D2, V3, V2 // 628464ce
SHA512SU0 V9.D2, V8.D2 // 2881c0ce
SHA512SU1 V7.D2, V6.D2, V5.D2 // c58867ce
VADDV V0.S4, V0 // 00b8b14e
VMOVI $82, V0.B16 // 40e6024f
VUADDLV V6.B16, V6 // c638306e
@ -141,6 +145,37 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VZIP2 V10.D2, V13.D2, V3.D2 // a379ca4e
VZIP1 V17.S2, V4.S2, V26.S2 // 9a38910e
VZIP2 V25.S2, V14.S2, V25.S2 // d979990e
VUXTL V30.B8, V30.H8 // dea7082f
VUXTL V30.H4, V29.S4 // dda7102f
VUXTL V29.S2, V2.D2 // a2a7202f
VUXTL2 V30.H8, V30.S4 // dea7106f
VUXTL2 V29.S4, V2.D2 // a2a7206f
VUXTL2 V30.B16, V2.H8 // c2a7086f
VBIT V21.B16, V25.B16, V4.B16 // 241fb56e
VBSL V23.B16, V3.B16, V7.B16 // 671c776e
VCMTST V2.B8, V29.B8, V2.B8 // a28f220e
VCMTST V2.D2, V23.D2, V3.D2 // e38ee24e
VSUB V2.B8, V30.B8, V30.B8 // de87222e
VUZP1 V0.B8, V30.B8, V1.B8 // c11b000e
VUZP1 V1.B16, V29.B16, V2.B16 // a21b014e
VUZP1 V2.H4, V28.H4, V3.H4 // 831b420e
VUZP1 V3.H8, V27.H8, V4.H8 // 641b434e
VUZP1 V28.S2, V2.S2, V5.S2 // 45189c0e
VUZP1 V29.S4, V1.S4, V6.S4 // 26189d4e
VUZP1 V30.D2, V0.D2, V7.D2 // 0718de4e
VUZP2 V0.D2, V30.D2, V1.D2 // c15bc04e
VUZP2 V30.D2, V0.D2, V29.D2 // 1d58de4e
VUSHLL $0, V30.B8, V30.H8 // dea7082f
VUSHLL $0, V30.H4, V29.S4 // dda7102f
VUSHLL $0, V29.S2, V2.D2 // a2a7202f
VUSHLL2 $0, V30.B16, V2.H8 // c2a7086f
VUSHLL2 $0, V30.H8, V30.S4 // dea7106f
VUSHLL2 $0, V29.S4, V2.D2 // a2a7206f
VUSHLL $7, V30.B8, V30.H8 // dea70f2f
VUSHLL $15, V30.H4, V29.S4 // dda71f2f
VUSHLL2 $31, V30.S4, V2.D2 // c2a73f6f
VBIF V0.B8, V30.B8, V1.B8 // c11fe02e
VBIF V30.B16, V0.B16, V2.B16 // 021cfe6e
MOVD (R2)(R6.SXTW), R4 // 44c866f8
MOVD (R3)(R6), R5 // MOVD (R3)(R6*1), R5 // 656866f8
MOVD (R2)(R6), R4 // MOVD (R2)(R6*1), R4 // 446866f8
@ -182,6 +217,10 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
FMOVS $(0.96875), F3 // 03f02d1e
FMOVD $(28.0), F4 // 0490671e
// move a large constant to a Vd.
FMOVD $0x8040201008040201, V20 // FMOVD $-9205322385119247871, V20
FMOVQ $0x8040201008040202, V29 // FMOVQ $-9205322385119247870, V29
FMOVS (R2)(R6), F4 // FMOVS (R2)(R6*1), F4 // 446866bc
FMOVS (R2)(R6<<2), F4 // 447866bc
FMOVD (R2)(R6), F4 // FMOVD (R2)(R6*1), F4 // 446866fc
@ -304,6 +343,17 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
MOVD $0x210000, R0 // MOVD $2162688, R0 // 2004a0d2
MOVD $0xffffffffffffaaaa, R1 // MOVD $-21846, R1 // a1aa8a92
MOVD $0x1002(RSP), R1 // MOVD $4098(RSP), R1 // fb074091610b0091
MOVD $0x1708(RSP), RSP // MOVD $5896(RSP), RSP // fb0740917f231c91
MOVD $0x2001(R7), R1 // MOVD $8193(R7), R1 // fb08409161070091
MOVD $0xffffff(R7), R1 // MOVD $16777215(R7), R1 // fbfc7f9161ff3f91
MOVD $-0x1(R7), R1 // MOVD $-1(R7), R1 // e10400d1
MOVD $-0x30(R7), R1 // MOVD $-48(R7), R1 // e1c000d1
MOVD $-0x708(R7), R1 // MOVD $-1800(R7), R1 // e1201cd1
MOVD $-0x2000(RSP), R1 // MOVD $-8192(RSP), R1 // e10b40d1
MOVD $-0x10000(RSP), RSP // MOVD $-65536(RSP), RSP // ff4340d1
//
// CLS
//
@ -355,17 +405,21 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8
VLD4 (R15), [V10.H4, V11.H4, V12.H4, V13.H4] // ea05400c
VLD4.P 32(R24), [V31.B8, V0.B8, V1.B8, V2.B8] // 1f03df0c
VLD4.P (R13)(R9), [V14.S2, V15.S2, V16.S2, V17.S2] // VLD4.P (R13)(R9*1), [V14.S2,V15.S2,V16.S2,V17.S2] // ae09c90c
VLD1R (R1), [V9.B8] // 29c0400d
VLD1R.P (R1), [V9.B8] // 29c0df0d
VLD1R.P 1(R1), [V2.B8] // 22c0df0d
VLD1R.P 2(R1), [V2.H4] // 22c4df0d
VLD1R (R0), [V0.B16] // 00c0404d
VLD1R.P 16(R0), [V0.B16] // 00c0df4d
VLD1R.P (R0), [V0.B16] // 00c0df4d
VLD1R.P (R15)(R1), [V15.H4] // VLD1R.P (R15)(R1*1), [V15.H4] // efc5c10d
VLD2R (R15), [V15.H4, V16.H4] // efc5600d
VLD2R.P 32(R0), [V0.D2, V1.D2] // 00ccff4d
VLD2R.P 16(R0), [V0.D2, V1.D2] // 00ccff4d
VLD2R.P (R0)(R5), [V31.D1, V0.D1] // VLD2R.P (R0)(R5*1), [V31.D1, V0.D1] // 1fcce50d
VLD3R (RSP), [V31.S2, V0.S2, V1.S2] // ffeb400d
VLD3R.P 24(R15), [V15.H4, V16.H4, V17.H4] // efe5df0d
VLD3R.P 6(R15), [V15.H4, V16.H4, V17.H4] // efe5df0d
VLD3R.P (R15)(R6), [V15.H8, V16.H8, V17.H8] // VLD3R.P (R15)(R6*1), [V15.H8, V16.H8, V17.H8] // efe5c64d
VLD4R (R0), [V0.B8, V1.B8, V2.B8, V3.B8] // 00e0600d
VLD4R.P 64(RSP), [V31.S4, V0.S4, V1.S4, V2.S4] // ffebff4d
VLD4R.P 16(RSP), [V31.S4, V0.S4, V1.S4, V2.S4] // ffebff4d
VLD4R.P (R15)(R9), [V15.H4, V16.H4, V17.H4, V18.H4] // VLD4R.P (R15)(R9*1), [V15.H4, V16.H4, V17.H4, V18.H4] // efe5e90d
VST1.P [V24.S2], 8(R2) // 58789f0c
VST1 [V29.S2, V30.S2], (R29) // bdab000c

View file

@ -591,7 +591,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
FMOVS R8, F15 // 0f01271e
FMOVD F2, F9 // 4940601e
FMOVS F4, F27 // 9b40201e
//TODO VFMOV $3.125, V8.2D // 28f5006f
//TODO VFMOV $3.125, V8.D2 // 28f5006f
FMSUBS F13, F21, F13, F19 // b3d50d1f
FMSUBD F11, F7, F15, F31 // ff9d4b1f
//TODO VFMUL V9.S[2], F21, F19 // b39a895f
@ -648,7 +648,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
FSUBS F25, F23, F0 // e03a391e
FSUBD F11, F13, F24 // b8396b1e
//TODO SCVTFSS F30, F20 // d4db215e
//TODO VSCVTF V7.2S, V17.2S // f1d8210e
//TODO VSCVTF V7.S2, V17.S2 // f1d8210e
SCVTFWS R3, F16 // 7000221e
SCVTFWD R20, F4 // 8402621e
SCVTFS R16, F12 // 0c02229e

View file

@ -339,4 +339,18 @@ TEXT errors(SB),$0
MRS ICV_EOIR1_EL1, R3 // ERROR "system register is not readable"
MRS PMSWINC_EL0, R3 // ERROR "system register is not readable"
MRS OSLAR_EL1, R3 // ERROR "system register is not readable"
VLD3R.P 24(R15), [V15.H4,V16.H4,V17.H4] // ERROR "invalid post-increment offset"
VBIT V1.H4, V12.H4, V3.H4 // ERROR "invalid arrangement"
VBSL V1.D2, V12.D2, V3.D2 // ERROR "invalid arrangement"
VUXTL V30.D2, V30.H8 // ERROR "operand mismatch"
VUXTL2 V20.B8, V21.H8 // ERROR "operand mismatch"
VUXTL V3.D2, V4.B8 // ERROR "operand mismatch"
VUZP1 V0.B8, V30.B8, V1.B16 // ERROR "operand mismatch"
VUZP2 V0.Q1, V30.Q1, V1.Q1 // ERROR "invalid arrangement"
VUSHLL $0, V30.D2, V30.H8 // ERROR "operand mismatch"
VUSHLL2 $0, V20.B8, V21.H8 // ERROR "operand mismatch"
VUSHLL $8, V30.B8, V30.H8 // ERROR "shift amount out of range"
VUSHLL2 $32, V30.S4, V2.D2 // ERROR "shift amount out of range"
VBIF V0.B8, V1.B8, V2.B16 // ERROR "operand mismatch"
VBIF V0.D2, V1.D2, V2.D2 // ERROR "invalid arrangement"
RET

View file

@ -1037,6 +1037,7 @@ label1:
// VSX load with length X-form (also left-justified)
LXVL R3,R4, VS0
LXVLL R3,R4, VS0
LXVX R3,R4, VS0
// VSX load, DQ-form
// <MNEMONIC> DQ(RA), XS produces
// <mnemonic> XS, DQ(RA)
@ -1060,6 +1061,7 @@ label1:
// VSX store with length, X-form (also left-justified)
STXVL VS0, R3,R4
STXVLL VS0, R3,R4
STXVX VS0, R3,R4
// VSX move from VSR, XX1-form
// <MNEMONIC> XS,RA produces

View file

@ -284,6 +284,10 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
RLDICLCC $0, R4, $15, R6 // 788603c1
RLDICR $0, R4, $15, R6 // 788603c4
RLDICRCC $0, R4, $15, R6 // 788603c5
RLDIC $0, R4, $15, R6 // 788603c8
RLDICCC $0, R4, $15, R6 // 788603c9
CLRLSLWI $16, R5, $8, R4 // 54a4861e
CLRLSLDI $2, R4, $24, R3 // 78831588
BEQ 0(PC) // 41820000
BGE 0(PC) // 40800000
@ -595,11 +599,13 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0
LXV 16(R3), VS1 // f4230011
LXVL R3, R4, VS1 // 7c23221a
LXVLL R3, R4, VS1 // 7c23225a
LXVX R3, R4, VS1 // 7c232218
LXSDX (R3)(R4), VS1 // 7c241c98
STXVD2X VS1, (R3)(R4) // 7c241f98
STXV VS1,16(R3) // f4230015
STXVL VS1, R3, R4 // 7c23231a
STXVLL VS1, R3, R4 // 7c23235a
STXVX VS1, R3, R4 // 7c232318
STXSDX VS1, (R3)(R4) // 7c241d98
LXSIWAX (R3)(R4), VS1 // 7c241898
STXSIWX VS1, (R3)(R4) // 7c241918

View file

@ -17,7 +17,6 @@ import (
var (
Debug = flag.Bool("debug", false, "dump instructions as they are parsed")
OutputFile = flag.String("o", "", "output file; default foo.o for /a/b/c/foo.s as first argument")
PrintOut = flag.Bool("S", false, "print assembly and machine code")
TrimPath = flag.String("trimpath", "", "remove prefix from recorded source file paths")
Shared = flag.Bool("shared", false, "generate code that can be linked into a shared library")
Dynlink = flag.Bool("dynlink", false, "support references to Go symbols defined in other shared libraries")
@ -25,19 +24,19 @@ var (
SymABIs = flag.Bool("gensymabis", false, "write symbol ABI information to output file, don't assemble")
Importpath = flag.String("p", "", "set expected package import to path")
Spectre = flag.String("spectre", "", "enable spectre mitigations in `list` (all, ret)")
Go115Newobj = flag.Bool("go115newobj", true, "use new object file format")
)
var (
D MultiFlag
I MultiFlag
PrintOut int
)
func init() {
flag.Var(&D, "D", "predefined symbol with optional simple value -D=identifier=value; can be set multiple times")
flag.Var(&I, "I", "include directory; can be set multiple times")
objabi.AddVersionFlag() // -V
objabi.Flagcount("S", "print assembly and machine code", &PrintOut)
}
// MultiFlag allows setting a value multiple times to collect a list, as in -I=dir1 -I=dir2.

View file

@ -35,13 +35,11 @@ func main() {
flags.Parse()
ctxt := obj.Linknew(architecture.LinkArch)
if *flags.PrintOut {
ctxt.Debugasm = 1
}
ctxt.Debugasm = flags.PrintOut
ctxt.Flag_dynlink = *flags.Dynlink
ctxt.Flag_shared = *flags.Shared || *flags.Dynlink
ctxt.Flag_go115newobj = *flags.Go115Newobj
ctxt.IsAsm = true
ctxt.Pkgpath = *flags.Importpath
switch *flags.Spectre {
default:
log.Printf("unknown setting -spectre=%s", *flags.Spectre)
@ -97,8 +95,8 @@ func main() {
}
}
if ok && !*flags.SymABIs {
ctxt.NumberSyms(true)
obj.WriteObjFile(ctxt, buf, "")
ctxt.NumberSyms()
obj.WriteObjFile(ctxt, buf)
}
if !ok || diag {
if failedFile != "" {

View file

@ -112,6 +112,13 @@ The default C and C++ compilers may be changed by the CC and CXX
environment variables, respectively; those environment variables
may include command line options.
The cgo tool will always invoke the C compiler with the source file's
directory in the include path; i.e. -I${SRCDIR} is always implied. This
means that if a header file foo/bar.h exists both in the source
directory and also in the system include directory (or some other place
specified by a -I flag), then "#include <foo/bar.h>" will always find the
local version in preference to any other version.
The cgo tool is enabled by default for native builds on systems where
it is expected to work. It is disabled by default when
cross-compiling. You can control this by setting the CGO_ENABLED

View file

@ -298,7 +298,7 @@ func (p *Package) guessKinds(f *File) []*Name {
continue
}
if goos == "darwin" && strings.HasSuffix(n.C, "Ref") {
if (goos == "darwin" || goos == "ios") && strings.HasSuffix(n.C, "Ref") {
// For FooRef, find out if FooGetTypeID exists.
s := n.C[:len(n.C)-3] + "GetTypeID"
n := &Name{Go: s, C: s}
@ -369,7 +369,18 @@ func (p *Package) guessKinds(f *File) []*Name {
fmt.Fprintf(&b, "#line 1 \"completed\"\n"+
"int __cgo__1 = __cgo__2;\n")
stderr := p.gccErrors(b.Bytes())
// We need to parse the output from this gcc command, so ensure that it
// doesn't have any ANSI escape sequences in it. (TERM=dumb is
// insufficient; if the user specifies CGO_CFLAGS=-fdiagnostics-color,
// GCC will ignore TERM, and GCC can also be configured at compile-time
// to ignore TERM.)
stderr := p.gccErrors(b.Bytes(), "-fdiagnostics-color=never")
if strings.Contains(stderr, "unrecognized command line option") {
// We're using an old version of GCC that doesn't understand
// -fdiagnostics-color. Those versions can't print color anyway,
// so just rerun without that option.
stderr = p.gccErrors(b.Bytes())
}
if stderr == "" {
fatalf("%s produced no output\non input:\n%s", p.gccBaseCmd()[0], b.Bytes())
}
@ -1970,22 +1981,25 @@ func (p *Package) gccDefines(stdin []byte) string {
// gccErrors runs gcc over the C program stdin and returns
// the errors that gcc prints. That is, this function expects
// gcc to fail.
func (p *Package) gccErrors(stdin []byte) string {
func (p *Package) gccErrors(stdin []byte, extraArgs ...string) string {
// TODO(rsc): require failure
args := p.gccCmd()
// Optimization options can confuse the error messages; remove them.
nargs := make([]string, 0, len(args))
nargs := make([]string, 0, len(args)+len(extraArgs))
for _, arg := range args {
if !strings.HasPrefix(arg, "-O") {
nargs = append(nargs, arg)
}
}
// Force -O0 optimization but keep the trailing "-" at the end.
nargs = append(nargs, "-O0")
nl := len(nargs)
nargs[nl-2], nargs[nl-1] = nargs[nl-1], nargs[nl-2]
// Force -O0 optimization and append extra arguments, but keep the
// trailing "-" at the end.
li := len(nargs) - 1
last := nargs[li]
nargs[li] = "-O0"
nargs = append(nargs, extraArgs...)
nargs = append(nargs, last)
if *debugGcc {
fmt.Fprintf(os.Stderr, "$ %s <<EOF\n", strings.Join(nargs, " "))
@ -2434,6 +2448,18 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
tt := *t
tt.C = &TypeRepr{"%s %s", []interface{}{dt.Kind, tag}}
tt.Go = c.Ident("struct{}")
if dt.Kind == "struct" {
// We don't know what the representation of this struct is, so don't let
// anyone allocate one on the Go side. As a side effect of this annotation,
// pointers to this type will not be considered pointers in Go. They won't
// get writebarrier-ed or adjusted during a stack copy. This should handle
// all the cases badPointerTypedef used to handle, but hopefully will
// continue to work going forward without any more need for cgo changes.
tt.NotInHeap = true
// TODO: we should probably do the same for unions. Unions can't live
// on the Go heap, right? It currently doesn't work for unions because
// they are defined as a type alias for struct{}, not a defined type.
}
typedef[name.Name] = &tt
break
}
@ -2504,6 +2530,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
}
t.Go = name
t.BadPointer = sub.BadPointer
t.NotInHeap = sub.NotInHeap
if unionWithPointer[sub.Go] {
unionWithPointer[t.Go] = true
}
@ -2514,6 +2541,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
tt := *t
tt.Go = sub.Go
tt.BadPointer = sub.BadPointer
tt.NotInHeap = sub.NotInHeap
typedef[name.Name] = &tt
}
@ -2817,22 +2845,12 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct
tgo := t.Go
size := t.Size
talign := t.Align
if f.BitSize > 0 {
switch f.BitSize {
case 8, 16, 32, 64:
default:
if f.BitOffset > 0 || f.BitSize > 0 {
// The layout of bitfields is implementation defined,
// so we don't know how they correspond to Go fields
// even if they are aligned at byte boundaries.
continue
}
size = f.BitSize / 8
name := tgo.(*ast.Ident).String()
if strings.HasPrefix(name, "int") {
name = "int"
} else {
name = "uint"
}
tgo = ast.NewIdent(name + fmt.Sprint(f.BitSize))
talign = size
}
if talign > 0 && f.ByteOffset%talign != 0 {
// Drop misaligned fields, the same way we drop integer bit fields.
@ -3022,6 +3040,7 @@ func (c *typeConv) anonymousStructTypedef(dt *dwarf.TypedefType) bool {
// non-pointers in this type.
// TODO: Currently our best solution is to find these manually and list them as
// they come up. A better solution is desired.
// Note: DEPRECATED. There is now a better solution. Search for NotInHeap in this file.
func (c *typeConv) badPointerTypedef(dt *dwarf.TypedefType) bool {
if c.badCFType(dt) {
return true
@ -3056,7 +3075,7 @@ func (c *typeConv) badCFType(dt *dwarf.TypedefType) bool {
// We identify the correct set of types as those ending in Ref and for which
// there exists a corresponding GetTypeID function.
// See comment below for details about the bad pointers.
if goos != "darwin" {
if goos != "darwin" && goos != "ios" {
return false
}
s := dt.Name

View file

@ -151,7 +151,8 @@ type Type struct {
Go ast.Expr
EnumValues map[string]int64
Typedef string
BadPointer bool
BadPointer bool // this pointer type should be represented as a uintptr (deprecated)
NotInHeap bool // this type should have a go:notinheap annotation
}
// A FuncType collects information about a function type in both the C and Go worlds.

View file

@ -108,6 +108,9 @@ func (p *Package) writeDefs() {
sort.Strings(typedefNames)
for _, name := range typedefNames {
def := typedef[name]
if def.NotInHeap {
fmt.Fprintf(fgo2, "//go:notinheap\n")
}
fmt.Fprintf(fgo2, "type %s ", name)
// We don't have source info for these types, so write them out without source info.
// Otherwise types would look like:
@ -123,7 +126,9 @@ func (p *Package) writeDefs() {
// Moreover, empty file name makes compile emit no source debug info at all.
var buf bytes.Buffer
noSourceConf.Fprint(&buf, fset, def.Go)
if bytes.HasPrefix(buf.Bytes(), []byte("_Ctype_")) {
if bytes.HasPrefix(buf.Bytes(), []byte("_Ctype_")) ||
strings.HasPrefix(name, "_Ctype_enum_") ||
strings.HasPrefix(name, "_Ctype_union_") {
// This typedef is of the form `typedef a b` and should be an alias.
fmt.Fprintf(fgo2, "= ")
}
@ -241,6 +246,7 @@ func (p *Package) writeDefs() {
if err != nil {
fatalf("%s", err)
}
defer fgcch.Close()
_, err = io.Copy(fexp, fgcch)
if err != nil {
fatalf("%s", err)

View file

@ -319,8 +319,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// TODO(khr): issue only the -1 fixup code we need.
// For instance, if only the quotient is used, no point in zeroing the remainder.
j1.To.Val = n1
j2.To.Val = s.Pc()
j1.To.SetTarget(n1)
j2.To.SetTarget(s.Pc())
}
case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU:
@ -874,7 +874,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpAMD64SUBLloadidx1, ssa.OpAMD64SUBLloadidx4, ssa.OpAMD64SUBLloadidx8, ssa.OpAMD64SUBQloadidx1, ssa.OpAMD64SUBQloadidx8,
ssa.OpAMD64ANDLloadidx1, ssa.OpAMD64ANDLloadidx4, ssa.OpAMD64ANDLloadidx8, ssa.OpAMD64ANDQloadidx1, ssa.OpAMD64ANDQloadidx8,
ssa.OpAMD64ORLloadidx1, ssa.OpAMD64ORLloadidx4, ssa.OpAMD64ORLloadidx8, ssa.OpAMD64ORQloadidx1, ssa.OpAMD64ORQloadidx8,
ssa.OpAMD64XORLloadidx1, ssa.OpAMD64XORLloadidx4, ssa.OpAMD64XORLloadidx8, ssa.OpAMD64XORQloadidx1, ssa.OpAMD64XORQloadidx8:
ssa.OpAMD64XORLloadidx1, ssa.OpAMD64XORLloadidx4, ssa.OpAMD64XORLloadidx8, ssa.OpAMD64XORQloadidx1, ssa.OpAMD64XORQloadidx8,
ssa.OpAMD64ADDSSloadidx1, ssa.OpAMD64ADDSSloadidx4, ssa.OpAMD64ADDSDloadidx1, ssa.OpAMD64ADDSDloadidx8,
ssa.OpAMD64SUBSSloadidx1, ssa.OpAMD64SUBSSloadidx4, ssa.OpAMD64SUBSDloadidx1, ssa.OpAMD64SUBSDloadidx8,
ssa.OpAMD64MULSSloadidx1, ssa.OpAMD64MULSSloadidx4, ssa.OpAMD64MULSDloadidx1, ssa.OpAMD64MULSDloadidx8,
ssa.OpAMD64DIVSSloadidx1, ssa.OpAMD64DIVSSloadidx4, ssa.OpAMD64DIVSDloadidx1, ssa.OpAMD64DIVSDloadidx8:
p := s.Prog(v.Op.Asm())
r, i := v.Args[1].Reg(), v.Args[2].Reg()

View file

@ -11,7 +11,7 @@ import (
"cmd/internal/objabi"
)
var darwin = objabi.GOOS == "darwin"
var darwin = objabi.GOOS == "darwin" || objabi.GOOS == "ios"
func padframe(frame int64) int64 {
// arm64 requires that the frame size (not counting saved FP&LR)

View file

@ -816,7 +816,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p.From.Reg = condBits[v.Aux.(ssa.Op)]
p.From.Reg = condBits[ssa.Op(v.AuxInt)]
p.Reg = v.Args[0].Reg()
p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r1})
p.To.Type = obj.TYPE_REG

View file

@ -392,7 +392,7 @@ func genhash(t *types.Type) *obj.LSym {
}
fn.Func.SetNilCheckDisabled(true)
funccompile(fn)
xtop = append(xtop, fn)
// Build closure. It doesn't close over any variables, so
// it contains just the function pointer.
@ -429,8 +429,7 @@ func hashfor(t *types.Type) *Node {
}
n := newname(sym)
n.SetClass(PFUNC)
n.Sym.SetFunc(true)
setNodeNameFunc(n)
n.Type = functype(nil, []*Node{
anonfield(types.NewPtr(t)),
anonfield(types.Types[TUINTPTR]),
@ -646,17 +645,11 @@ func geneq(t *types.Type) *obj.LSym {
// Build a list of conditions to satisfy.
// The conditions are a list-of-lists. Conditions are reorderable
// within each inner list. The outer lists must be evaluated in order.
// Even within each inner list, track their order so that we can preserve
// aspects of that order. (TODO: latter part needed?)
type nodeIdx struct {
n *Node
idx int
}
var conds [][]nodeIdx
conds = append(conds, []nodeIdx{})
var conds [][]*Node
conds = append(conds, []*Node{})
and := func(n *Node) {
i := len(conds) - 1
conds[i] = append(conds[i], nodeIdx{n: n, idx: len(conds[i])})
conds[i] = append(conds[i], n)
}
// Walk the struct using memequal for runs of AMEM
@ -674,7 +667,7 @@ func geneq(t *types.Type) *obj.LSym {
if !IsRegularMemory(f.Type) {
if EqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions.
conds = append(conds, []nodeIdx{})
conds = append(conds, []*Node{})
}
p := nodSym(OXDOT, np, f.Sym)
q := nodSym(OXDOT, nq, f.Sym)
@ -688,7 +681,7 @@ func geneq(t *types.Type) *obj.LSym {
}
if EqCanPanic(f.Type) {
// Also enforce ordering after something that can panic.
conds = append(conds, []nodeIdx{})
conds = append(conds, []*Node{})
}
i++
continue
@ -713,14 +706,13 @@ func geneq(t *types.Type) *obj.LSym {
// Sort conditions to put runtime calls last.
// Preserve the rest of the ordering.
var flatConds []nodeIdx
var flatConds []*Node
for _, c := range conds {
sort.SliceStable(c, func(i, j int) bool {
x, y := c[i], c[j]
if (x.n.Op != OCALL) == (y.n.Op != OCALL) {
return x.idx < y.idx
isCall := func(n *Node) bool {
return n.Op == OCALL || n.Op == OCALLFUNC
}
return x.n.Op != OCALL
sort.SliceStable(c, func(i, j int) bool {
return !isCall(c[i]) && isCall(c[j])
})
flatConds = append(flatConds, c...)
}
@ -729,9 +721,9 @@ func geneq(t *types.Type) *obj.LSym {
if len(flatConds) == 0 {
cond = nodbool(true)
} else {
cond = flatConds[0].n
cond = flatConds[0]
for _, c := range flatConds[1:] {
cond = nod(OANDAND, cond, c.n)
cond = nod(OANDAND, cond, c)
}
}
@ -762,7 +754,7 @@ func geneq(t *types.Type) *obj.LSym {
// neither of which can be nil, and our comparisons
// are shallow.
fn.Func.SetNilCheckDisabled(true)
funccompile(fn)
xtop = append(xtop, fn)
// Generate a closure which points at the function we just generated.
dsymptr(closure, 0, sym.Linksym(), 0)

View file

@ -64,69 +64,68 @@ var runtimeDecls = [...]struct {
{"stringtoslicebyte", funcTag, 49},
{"stringtoslicerune", funcTag, 52},
{"slicecopy", funcTag, 53},
{"slicestringcopy", funcTag, 54},
{"decoderune", funcTag, 55},
{"countrunes", funcTag, 56},
{"convI2I", funcTag, 57},
{"convT16", funcTag, 58},
{"convT32", funcTag, 58},
{"convT64", funcTag, 58},
{"convTstring", funcTag, 58},
{"convTslice", funcTag, 58},
{"convT2E", funcTag, 59},
{"convT2Enoptr", funcTag, 59},
{"convT2I", funcTag, 59},
{"convT2Inoptr", funcTag, 59},
{"assertE2I", funcTag, 57},
{"assertE2I2", funcTag, 60},
{"assertI2I", funcTag, 57},
{"assertI2I2", funcTag, 60},
{"panicdottypeE", funcTag, 61},
{"panicdottypeI", funcTag, 61},
{"panicnildottype", funcTag, 62},
{"ifaceeq", funcTag, 64},
{"efaceeq", funcTag, 64},
{"fastrand", funcTag, 66},
{"makemap64", funcTag, 68},
{"makemap", funcTag, 69},
{"makemap_small", funcTag, 70},
{"mapaccess1", funcTag, 71},
{"mapaccess1_fast32", funcTag, 72},
{"mapaccess1_fast64", funcTag, 72},
{"mapaccess1_faststr", funcTag, 72},
{"mapaccess1_fat", funcTag, 73},
{"mapaccess2", funcTag, 74},
{"mapaccess2_fast32", funcTag, 75},
{"mapaccess2_fast64", funcTag, 75},
{"mapaccess2_faststr", funcTag, 75},
{"mapaccess2_fat", funcTag, 76},
{"mapassign", funcTag, 71},
{"mapassign_fast32", funcTag, 72},
{"mapassign_fast32ptr", funcTag, 72},
{"mapassign_fast64", funcTag, 72},
{"mapassign_fast64ptr", funcTag, 72},
{"mapassign_faststr", funcTag, 72},
{"mapiterinit", funcTag, 77},
{"mapdelete", funcTag, 77},
{"mapdelete_fast32", funcTag, 78},
{"mapdelete_fast64", funcTag, 78},
{"mapdelete_faststr", funcTag, 78},
{"mapiternext", funcTag, 79},
{"mapclear", funcTag, 80},
{"makechan64", funcTag, 82},
{"makechan", funcTag, 83},
{"chanrecv1", funcTag, 85},
{"chanrecv2", funcTag, 86},
{"chansend1", funcTag, 88},
{"decoderune", funcTag, 54},
{"countrunes", funcTag, 55},
{"convI2I", funcTag, 56},
{"convT16", funcTag, 57},
{"convT32", funcTag, 57},
{"convT64", funcTag, 57},
{"convTstring", funcTag, 57},
{"convTslice", funcTag, 57},
{"convT2E", funcTag, 58},
{"convT2Enoptr", funcTag, 58},
{"convT2I", funcTag, 58},
{"convT2Inoptr", funcTag, 58},
{"assertE2I", funcTag, 56},
{"assertE2I2", funcTag, 59},
{"assertI2I", funcTag, 56},
{"assertI2I2", funcTag, 59},
{"panicdottypeE", funcTag, 60},
{"panicdottypeI", funcTag, 60},
{"panicnildottype", funcTag, 61},
{"ifaceeq", funcTag, 63},
{"efaceeq", funcTag, 63},
{"fastrand", funcTag, 65},
{"makemap64", funcTag, 67},
{"makemap", funcTag, 68},
{"makemap_small", funcTag, 69},
{"mapaccess1", funcTag, 70},
{"mapaccess1_fast32", funcTag, 71},
{"mapaccess1_fast64", funcTag, 71},
{"mapaccess1_faststr", funcTag, 71},
{"mapaccess1_fat", funcTag, 72},
{"mapaccess2", funcTag, 73},
{"mapaccess2_fast32", funcTag, 74},
{"mapaccess2_fast64", funcTag, 74},
{"mapaccess2_faststr", funcTag, 74},
{"mapaccess2_fat", funcTag, 75},
{"mapassign", funcTag, 70},
{"mapassign_fast32", funcTag, 71},
{"mapassign_fast32ptr", funcTag, 71},
{"mapassign_fast64", funcTag, 71},
{"mapassign_fast64ptr", funcTag, 71},
{"mapassign_faststr", funcTag, 71},
{"mapiterinit", funcTag, 76},
{"mapdelete", funcTag, 76},
{"mapdelete_fast32", funcTag, 77},
{"mapdelete_fast64", funcTag, 77},
{"mapdelete_faststr", funcTag, 77},
{"mapiternext", funcTag, 78},
{"mapclear", funcTag, 79},
{"makechan64", funcTag, 81},
{"makechan", funcTag, 82},
{"chanrecv1", funcTag, 84},
{"chanrecv2", funcTag, 85},
{"chansend1", funcTag, 87},
{"closechan", funcTag, 30},
{"writeBarrier", varTag, 90},
{"typedmemmove", funcTag, 91},
{"typedmemclr", funcTag, 92},
{"typedslicecopy", funcTag, 93},
{"selectnbsend", funcTag, 94},
{"selectnbrecv", funcTag, 95},
{"selectnbrecv2", funcTag, 97},
{"selectsetpc", funcTag, 62},
{"writeBarrier", varTag, 89},
{"typedmemmove", funcTag, 90},
{"typedmemclr", funcTag, 91},
{"typedslicecopy", funcTag, 92},
{"selectnbsend", funcTag, 93},
{"selectnbrecv", funcTag, 94},
{"selectnbrecv2", funcTag, 96},
{"selectsetpc", funcTag, 97},
{"selectgo", funcTag, 98},
{"block", funcTag, 9},
{"makeslice", funcTag, 99},
@ -257,51 +256,51 @@ func runtimeTypes() []*types.Type {
typs[51] = types.NewPtr(typs[50])
typs[52] = functype(nil, []*Node{anonfield(typs[51]), anonfield(typs[28])}, []*Node{anonfield(typs[46])})
typs[53] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*Node{anonfield(typs[15])})
typs[54] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[55] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[45]), anonfield(typs[15])})
typs[56] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[57] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
typs[58] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
typs[62] = functype(nil, []*Node{anonfield(typs[1])}, nil)
typs[63] = types.NewPtr(typs[5])
typs[64] = functype(nil, []*Node{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[65] = types.Types[TUINT32]
typs[66] = functype(nil, nil, []*Node{anonfield(typs[65])})
typs[67] = types.NewMap(typs[2], typs[2])
typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[67])})
typs[70] = functype(nil, nil, []*Node{anonfield(typs[67])})
typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
typs[79] = functype(nil, []*Node{anonfield(typs[3])}, nil)
typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[67])}, nil)
typs[81] = types.NewChan(typs[2], types.Cboth)
typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[81])})
typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[81])})
typs[84] = types.NewChan(typs[2], types.Crecv)
typs[85] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, nil)
typs[86] = functype(nil, []*Node{anonfield(typs[84]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[87] = types.NewChan(typs[2], types.Csend)
typs[88] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, nil)
typs[89] = types.NewArray(typs[0], 3)
typs[90] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
typs[94] = functype(nil, []*Node{anonfield(typs[87]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
typs[96] = types.NewPtr(typs[6])
typs[97] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*Node{anonfield(typs[6])})
typs[98] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
typs[54] = functype(nil, []*Node{anonfield(typs[28]), anonfield(typs[15])}, []*Node{anonfield(typs[45]), anonfield(typs[15])})
typs[55] = functype(nil, []*Node{anonfield(typs[28])}, []*Node{anonfield(typs[15])})
typs[56] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])})
typs[57] = functype(nil, []*Node{anonfield(typs[2])}, []*Node{anonfield(typs[7])})
typs[58] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])})
typs[59] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[6])})
typs[60] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
typs[61] = functype(nil, []*Node{anonfield(typs[1])}, nil)
typs[62] = types.NewPtr(typs[5])
typs[63] = functype(nil, []*Node{anonfield(typs[62]), anonfield(typs[7]), anonfield(typs[7])}, []*Node{anonfield(typs[6])})
typs[64] = types.Types[TUINT32]
typs[65] = functype(nil, nil, []*Node{anonfield(typs[64])})
typs[66] = types.NewMap(typs[2], typs[2])
typs[67] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*Node{anonfield(typs[66])})
typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[66])})
typs[69] = functype(nil, nil, []*Node{anonfield(typs[66])})
typs[70] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, []*Node{anonfield(typs[3])})
typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, []*Node{anonfield(typs[3])})
typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])})
typs[73] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[74] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[6])})
typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[3])}, nil)
typs[77] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66]), anonfield(typs[2])}, nil)
typs[78] = functype(nil, []*Node{anonfield(typs[3])}, nil)
typs[79] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[66])}, nil)
typs[80] = types.NewChan(typs[2], types.Cboth)
typs[81] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22])}, []*Node{anonfield(typs[80])})
typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[80])})
typs[83] = types.NewChan(typs[2], types.Crecv)
typs[84] = functype(nil, []*Node{anonfield(typs[83]), anonfield(typs[3])}, nil)
typs[85] = functype(nil, []*Node{anonfield(typs[83]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[86] = types.NewChan(typs[2], types.Csend)
typs[87] = functype(nil, []*Node{anonfield(typs[86]), anonfield(typs[3])}, nil)
typs[88] = types.NewArray(typs[0], 3)
typs[89] = tostruct([]*Node{namedfield("enabled", typs[6]), namedfield("pad", typs[88]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
typs[90] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil)
typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*Node{anonfield(typs[15])})
typs[93] = functype(nil, []*Node{anonfield(typs[86]), anonfield(typs[3])}, []*Node{anonfield(typs[6])})
typs[94] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[83])}, []*Node{anonfield(typs[6])})
typs[95] = types.NewPtr(typs[6])
typs[96] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[95]), anonfield(typs[83])}, []*Node{anonfield(typs[6])})
typs[97] = functype(nil, []*Node{anonfield(typs[62])}, nil)
typs[98] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[62]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*Node{anonfield(typs[15]), anonfield(typs[6])})
typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[7])})
typs[100] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*Node{anonfield(typs[7])})
typs[101] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*Node{anonfield(typs[7])})
@ -318,10 +317,10 @@ func runtimeTypes() []*types.Type {
typs[112] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, []*Node{anonfield(typs[24])})
typs[113] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[22])})
typs[114] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[24])})
typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[65])})
typs[115] = functype(nil, []*Node{anonfield(typs[20])}, []*Node{anonfield(typs[64])})
typs[116] = functype(nil, []*Node{anonfield(typs[22])}, []*Node{anonfield(typs[20])})
typs[117] = functype(nil, []*Node{anonfield(typs[24])}, []*Node{anonfield(typs[20])})
typs[118] = functype(nil, []*Node{anonfield(typs[65])}, []*Node{anonfield(typs[20])})
typs[118] = functype(nil, []*Node{anonfield(typs[64])}, []*Node{anonfield(typs[20])})
typs[119] = functype(nil, []*Node{anonfield(typs[26]), anonfield(typs[26])}, []*Node{anonfield(typs[26])})
typs[120] = functype(nil, []*Node{anonfield(typs[5])}, nil)
typs[121] = functype(nil, []*Node{anonfield(typs[5]), anonfield(typs[5])}, nil)
@ -332,7 +331,7 @@ func runtimeTypes() []*types.Type {
typs[126] = functype(nil, []*Node{anonfield(typs[125]), anonfield(typs[125])}, nil)
typs[127] = types.Types[TUINT16]
typs[128] = functype(nil, []*Node{anonfield(typs[127]), anonfield(typs[127])}, nil)
typs[129] = functype(nil, []*Node{anonfield(typs[65]), anonfield(typs[65])}, nil)
typs[129] = functype(nil, []*Node{anonfield(typs[64]), anonfield(typs[64])}, nil)
typs[130] = functype(nil, []*Node{anonfield(typs[24]), anonfield(typs[24])}, nil)
return typs[:]
}

View file

@ -75,8 +75,7 @@ func slicebytetostringtmp(ptr *byte, n int) string
func slicerunetostring(*[32]byte, []rune) string
func stringtoslicebyte(*[32]byte, string) []byte
func stringtoslicerune(*[32]rune, string) []rune
func slicecopy(toPtr *any, toLen int, frPtr *any, frLen int, wid uintptr) int
func slicestringcopy(toPtr *byte, toLen int, fr string) int
func slicecopy(toPtr *any, toLen int, fromPtr *any, fromLen int, wid uintptr) int
func decoderune(string, int) (retv rune, retk int)
func countrunes(string) int
@ -169,8 +168,8 @@ func selectnbsend(hchan chan<- any, elem *any) bool
func selectnbrecv(elem *any, hchan <-chan any) bool
func selectnbrecv2(elem *any, received *bool, hchan <-chan any) bool
func selectsetpc(cas *byte)
func selectgo(cas0 *byte, order0 *byte, ncases int) (int, bool)
func selectsetpc(pc *uintptr)
func selectgo(cas0 *byte, order0 *byte, pc0 *uintptr, nsends int, nrecvs int, block bool) (int, bool)
func block()
func makeslice(typ *byte, len int, cap int) unsafe.Pointer

View file

@ -107,8 +107,7 @@ func typecheckclosure(clo *Node, top int) {
}
xfunc.Func.Nname.Sym = closurename(Curfn)
disableExport(xfunc.Func.Nname.Sym)
declare(xfunc.Func.Nname, PFUNC)
setNodeNameFunc(xfunc.Func.Nname)
xfunc = typecheck(xfunc, ctxStmt)
// Type check the body now, but only if we're inside a function.
@ -429,6 +428,7 @@ func typecheckpartialcall(fn *Node, sym *types.Sym) {
// Create top-level function.
xfunc := makepartialcall(fn, fn.Type, sym)
fn.Func = xfunc.Func
fn.Func.SetWrapper(true)
fn.Right = newname(sym)
fn.Op = OCALLPART
fn.Type = xfunc.Type
@ -462,7 +462,6 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node {
tfn.List.Set(structargs(t0.Params(), true))
tfn.Rlist.Set(structargs(t0.Results(), false))
disableExport(sym)
xfunc := dclfunc(sym, tfn)
xfunc.Func.SetDupok(true)
xfunc.Func.SetNeedctxt(true)
@ -525,7 +524,7 @@ func walkpartialcall(n *Node, init *Nodes) *Node {
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
// clos = &struct{F uintptr; R T}{M.T·f, x}
// clos = &struct{F uintptr; R T}{T.M·f, x}
//
// Like walkclosure above.

View file

@ -44,7 +44,7 @@ func (v Val) Ctype() Ctype {
Fatalf("unexpected Ctype for %T", v.U)
panic("unreachable")
case nil:
return 0
return CTxxx
case *NilVal:
return CTNIL
case bool:
@ -261,7 +261,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, context func() string) *Nod
}
if t == nil || !okforconst[t.Etype] {
t = defaultType(idealkind(n))
t = defaultType(n.Type)
}
switch n.Op {
@ -838,10 +838,6 @@ Outer:
return Val{}
}
u.Quo(y)
case OMOD, OOR, OAND, OANDNOT, OXOR:
// TODO(mdempsky): Move to typecheck; see #31060.
yyerror("invalid operation: operator %v not defined on untyped float", op)
return Val{}
default:
break Outer
}
@ -867,10 +863,6 @@ Outer:
yyerror("complex division by zero")
return Val{}
}
case OMOD, OOR, OAND, OANDNOT, OXOR:
// TODO(mdempsky): Move to typecheck; see #31060.
yyerror("invalid operation: operator %v not defined on untyped complex", op)
return Val{}
default:
break Outer
}
@ -932,15 +924,6 @@ func unaryOp(op Op, x Val, t *types.Type) Val {
}
u.Xor(x)
return Val{U: u}
case CTFLT:
// TODO(mdempsky): Move to typecheck; see #31060.
yyerror("invalid operation: operator %v not defined on untyped float", op)
return Val{}
case CTCPLX:
// TODO(mdempsky): Move to typecheck; see #31060.
yyerror("invalid operation: operator %v not defined on untyped complex", op)
return Val{}
}
case ONOT:
@ -994,10 +977,8 @@ func setconst(n *Node, v Val) {
Xoffset: BADWIDTH,
}
n.SetVal(v)
if n.Type.IsUntyped() {
// TODO(mdempsky): Make typecheck responsible for setting
// the correct untyped type.
n.Type = idealType(v.Ctype())
if vt := idealType(v.Ctype()); n.Type.IsUntyped() && n.Type != vt {
Fatalf("untyped type mismatch, have: %v, want: %v", n.Type, vt)
}
// Check range.
@ -1056,67 +1037,6 @@ func idealType(ct Ctype) *types.Type {
return nil
}
// idealkind returns a constant kind like consttype
// but for an arbitrary "ideal" (untyped constant) expression.
func idealkind(n *Node) Ctype {
if n == nil || !n.Type.IsUntyped() {
return CTxxx
}
switch n.Op {
default:
return CTxxx
case OLITERAL:
return n.Val().Ctype()
// numeric kinds.
case OADD,
OAND,
OANDNOT,
OBITNOT,
ODIV,
ONEG,
OMOD,
OMUL,
OSUB,
OXOR,
OOR,
OPLUS:
k1 := idealkind(n.Left)
k2 := idealkind(n.Right)
if k1 > k2 {
return k1
} else {
return k2
}
case OREAL, OIMAG:
return CTFLT
case OCOMPLEX:
return CTCPLX
case OADDSTR:
return CTSTR
case OANDAND,
OEQ,
OGE,
OGT,
OLE,
OLT,
ONE,
ONOT,
OOROR:
return CTBOOL
// shifts (beware!).
case OLSH, ORSH:
return idealkind(n.Left)
}
}
// defaultlit on both nodes simultaneously;
// if they're both ideal going in they better
// get the same type going out.
@ -1152,32 +1072,60 @@ func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) {
return l, r
}
k := idealkind(l)
if rk := idealkind(r); rk > k {
k = rk
}
t := defaultType(k)
t := defaultType(mixUntyped(l.Type, r.Type))
l = convlit(l, t)
r = convlit(r, t)
return l, r
}
func defaultType(k Ctype) *types.Type {
switch k {
case CTBOOL:
func ctype(t *types.Type) Ctype {
switch t {
case types.Idealbool:
return CTBOOL
case types.Idealstring:
return CTSTR
case types.Idealint:
return CTINT
case types.Idealrune:
return CTRUNE
case types.Idealfloat:
return CTFLT
case types.Idealcomplex:
return CTCPLX
}
Fatalf("bad type %v", t)
panic("unreachable")
}
func mixUntyped(t1, t2 *types.Type) *types.Type {
t := t1
if ctype(t2) > ctype(t1) {
t = t2
}
return t
}
func defaultType(t *types.Type) *types.Type {
if !t.IsUntyped() || t.Etype == TNIL {
return t
}
switch t {
case types.Idealbool:
return types.Types[TBOOL]
case CTSTR:
case types.Idealstring:
return types.Types[TSTRING]
case CTINT:
case types.Idealint:
return types.Types[TINT]
case CTRUNE:
case types.Idealrune:
return types.Runetype
case CTFLT:
case types.Idealfloat:
return types.Types[TFLOAT64]
case CTCPLX:
case types.Idealcomplex:
return types.Types[TCOMPLEX128]
}
Fatalf("bad idealkind: %v", k)
Fatalf("bad type %v", t)
return nil
}

View file

@ -90,7 +90,7 @@ func declare(n *Node, ctxt Class) {
lineno = n.Pos
Fatalf("automatic outside function")
}
if Curfn != nil {
if Curfn != nil && ctxt != PFUNC {
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
}
if n.Op == OTYPE {
@ -297,6 +297,16 @@ func oldname(s *types.Sym) *Node {
return n
}
// importName is like oldname, but it reports an error if sym is from another package and not exported.
func importName(sym *types.Sym) *Node {
n := oldname(sym)
if !types.IsExported(sym.Name) && sym.Pkg != localpkg {
n.SetDiag(true)
yyerror("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
}
return n
}
// := declarations
func colasname(n *Node) bool {
switch n.Op {
@ -372,14 +382,11 @@ func ifacedcl(n *Node) {
// returns in auto-declaration context.
func funchdr(n *Node) {
// change the declaration context from extern to auto
if Curfn == nil && dclcontext != PEXTERN {
Fatalf("funchdr: dclcontext = %d", dclcontext)
}
dclcontext = PAUTO
types.Markdcl()
funcstack = append(funcstack, Curfn)
funcStack = append(funcStack, funcStackEnt{Curfn, dclcontext})
Curfn = n
dclcontext = PAUTO
types.Markdcl()
if n.Func.Nname != nil {
funcargs(n.Func.Nname.Name.Param.Ntype)
@ -487,21 +494,22 @@ func funcarg2(f *types.Field, ctxt Class) {
declare(n, ctxt)
}
var funcstack []*Node // stack of previous values of Curfn
var funcStack []funcStackEnt // stack of previous values of Curfn/dclcontext
type funcStackEnt struct {
curfn *Node
dclcontext Class
}
// finish the body.
// called in auto-declaration context.
// returns in extern-declaration context.
func funcbody() {
// change the declaration context from auto to extern
if dclcontext != PAUTO {
Fatalf("funcbody: unexpected dclcontext %d", dclcontext)
}
// change the declaration context from auto to previous context
types.Popdcl()
funcstack, Curfn = funcstack[:len(funcstack)-1], funcstack[len(funcstack)-1]
if Curfn == nil {
dclcontext = PEXTERN
}
var e funcStackEnt
funcStack, e = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1]
Curfn, dclcontext = e.curfn, e.dclcontext
}
// structs, functions, and methods.
@ -975,10 +983,14 @@ func makefuncsym(s *types.Sym) {
}
}
// disableExport prevents sym from being included in package export
// data. To be effectual, it must be called before declare.
func disableExport(sym *types.Sym) {
sym.SetOnExportList(true)
// setNodeNameFunc marks a node as a function.
func setNodeNameFunc(n *Node) {
if n.Op != ONAME || n.Class() != Pxxx {
Fatalf("expected ONAME/Pxxx node, got %v", n)
}
n.SetClass(PFUNC)
n.Sym.SetFunc(true)
}
func dclfunc(sym *types.Sym, tfn *Node) *Node {
@ -990,7 +1002,7 @@ func dclfunc(sym *types.Sym, tfn *Node) *Node {
fn.Func.Nname = newfuncnamel(lineno, sym)
fn.Func.Nname.Name.Defn = fn
fn.Func.Nname.Name.Param.Ntype = tfn
declare(fn.Func.Nname, PFUNC)
setNodeNameFunc(fn.Func.Nname)
funchdr(fn)
fn.Func.Nname.Name.Param.Ntype = typecheck(fn.Func.Nname.Name.Param.Ntype, ctxType)
return fn

View file

@ -187,6 +187,13 @@ func mustHeapAlloc(n *Node) bool {
return true
}
if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
return true
}
if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
return true
}
if n.Op == OMAKESLICE && !isSmallMakeSlice(n) {
return true
}
@ -370,14 +377,14 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
// This really doesn't have much to do with escape analysis per se,
// but we are reusing the ability to annotate an individual function
// argument and pass those annotations along to importing code.
if f.Type.Etype == TUINTPTR {
if f.Type.IsUintptr() {
if Debug['m'] != 0 {
Warnl(f.Pos, "assuming %v is unsafe uintptr", name())
}
return unsafeUintptrTag
}
if !types.Haspointers(f.Type) { // don't bother tagging for scalars
if !f.Type.HasPointers() { // don't bother tagging for scalars
return ""
}
@ -400,13 +407,13 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
}
if fn.Func.Pragma&UintptrEscapes != 0 {
if f.Type.Etype == TUINTPTR {
if f.Type.IsUintptr() {
if Debug['m'] != 0 {
Warnl(f.Pos, "marking %v as escaping uintptr", name())
}
return uintptrEscapesTag
}
if f.IsDDD() && f.Type.Elem().Etype == TUINTPTR {
if f.IsDDD() && f.Type.Elem().IsUintptr() {
// final argument is ...uintptr.
if Debug['m'] != 0 {
Warnl(f.Pos, "marking %v as escaping ...uintptr", name())
@ -415,7 +422,7 @@ func (e *Escape) paramTag(fn *Node, narg int, f *types.Field) string {
}
}
if !types.Haspointers(f.Type) { // don't bother tagging for scalars
if !f.Type.HasPointers() { // don't bother tagging for scalars
return ""
}

View file

@ -326,7 +326,7 @@ func (e *Escape) stmt(n *Node) {
if typesw && n.Left.Left != nil {
cv := cas.Rlist.First()
k := e.dcl(cv) // type switch variables have no ODCL.
if types.Haspointers(cv.Type) {
if cv.Type.HasPointers() {
ks = append(ks, k.dotType(cv.Type, cas, "switch case"))
}
}
@ -433,7 +433,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) {
if uintptrEscapesHack && n.Op == OCONVNOP && n.Left.Type.IsUnsafePtr() {
// nop
} else if k.derefs >= 0 && !types.Haspointers(n.Type) {
} else if k.derefs >= 0 && !n.Type.HasPointers() {
k = e.discardHole()
}
@ -485,7 +485,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) {
e.discard(max)
case OCONV, OCONVNOP:
if checkPtr(e.curfn, 2) && n.Type.Etype == TUNSAFEPTR && n.Left.Type.IsPtr() {
if checkPtr(e.curfn, 2) && n.Type.IsUnsafePtr() && n.Left.Type.IsPtr() {
// When -d=checkptr=2 is enabled, treat
// conversions to unsafe.Pointer as an
// escaping operation. This allows better
@ -493,7 +493,7 @@ func (e *Escape) exprSkipInit(k EscHole, n *Node) {
// easily detect object boundaries on the heap
// than the stack.
e.assignHeap(n.Left, "conversion to unsafe.Pointer", n)
} else if n.Type.Etype == TUNSAFEPTR && n.Left.Type.Etype == TUINTPTR {
} else if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() {
e.unsafeValue(k, n.Left)
} else {
e.expr(k, n.Left)
@ -625,7 +625,7 @@ func (e *Escape) unsafeValue(k EscHole, n *Node) {
switch n.Op {
case OCONV, OCONVNOP:
if n.Left.Type.Etype == TUNSAFEPTR {
if n.Left.Type.IsUnsafePtr() {
e.expr(k, n.Left)
} else {
e.discard(n.Left)
@ -698,7 +698,7 @@ func (e *Escape) addr(n *Node) EscHole {
e.assignHeap(n.Right, "key of map put", n)
}
if !types.Haspointers(n.Type) {
if !n.Type.HasPointers() {
k = e.discardHole()
}
@ -811,14 +811,14 @@ func (e *Escape) call(ks []EscHole, call, where *Node) {
// slice might be allocated, and all slice elements
// might flow to heap.
appendeeK := ks[0]
if types.Haspointers(args[0].Type.Elem()) {
if args[0].Type.Elem().HasPointers() {
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
}
argument(appendeeK, args[0])
if call.IsDDD() {
appendedK := e.discardHole()
if args[1].Type.IsSlice() && types.Haspointers(args[1].Type.Elem()) {
if args[1].Type.IsSlice() && args[1].Type.Elem().HasPointers() {
appendedK = e.heapHole().deref(call, "appended slice...")
}
argument(appendedK, args[1])
@ -832,7 +832,7 @@ func (e *Escape) call(ks []EscHole, call, where *Node) {
argument(e.discardHole(), call.Left)
copiedK := e.discardHole()
if call.Right.Type.IsSlice() && types.Haspointers(call.Right.Type.Elem()) {
if call.Right.Type.IsSlice() && call.Right.Type.Elem().HasPointers() {
copiedK = e.heapHole().deref(call, "copied slice")
}
argument(copiedK, call.Right)
@ -1029,6 +1029,9 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
if e.curfn == nil {
Fatalf("e.curfn isn't set")
}
if n != nil && n.Type != nil && n.Type.NotInHeap() {
yyerrorl(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type)
}
n = canonicalNode(n)
loc := &EscLocation{

View file

@ -711,6 +711,17 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited
return
}
if t.Etype == types.TRESULTS {
tys := t.Extra.(*types.Results).Types
for i, et := range tys {
if i > 0 {
b.WriteByte(',')
}
b.WriteString(et.String())
}
return
}
flag, mode = flag.update(mode)
if mode == FTypeIdName {
flag |= FmtUnsigned
@ -1407,7 +1418,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
return
}
if n.Right != nil {
mode.Fprintf(s, "%v literal", n.Right)
mode.Fprintf(s, "%v{%s}", n.Right, ellipsisIf(n.List.Len() != 0))
return
}
@ -1421,7 +1432,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
if mode == FErr {
mode.Fprintf(s, "%v literal", n.Type)
mode.Fprintf(s, "%v{%s}", n.Type, ellipsisIf(n.List.Len() != 0))
return
}
mode.Fprintf(s, "(%v{ %.v })", n.Type, n.List)
@ -1616,7 +1627,8 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
}
n1.exprfmt(s, nprec, mode)
}
case ODDD:
mode.Fprintf(s, "...")
default:
mode.Fprintf(s, "<node %v>", n.Op)
}
@ -1933,3 +1945,10 @@ func indent(s fmt.State) {
fmt.Fprint(s, ". ")
}
}
func ellipsisIf(b bool) string {
if b {
return "..."
}
return ""
}

View file

@ -32,7 +32,6 @@ package gc
import (
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
@ -316,7 +315,7 @@ func ggloblnod(nam *Node) {
if nam.Name.Readonly() {
flags = obj.RODATA
}
if nam.Type != nil && !types.Haspointers(nam.Type) {
if nam.Type != nil && !nam.Type.HasPointers() {
flags |= obj.NOPTR
}
Ctxt.Globl(s, nam.Type.Width, flags)
@ -343,6 +342,6 @@ func Patch(p *obj.Prog, to *obj.Prog) {
if p.To.Type != obj.TYPE_BRANCH {
Fatalf("patch: not a branch")
}
p.To.Val = to
p.To.SetTarget(to)
p.To.Offset = to.Pc
}

View file

@ -205,8 +205,9 @@ import (
"bufio"
"bytes"
"cmd/compile/internal/types"
"cmd/internal/goobj2"
"cmd/internal/goobj"
"cmd/internal/src"
"crypto/md5"
"encoding/binary"
"fmt"
"io"
@ -295,12 +296,15 @@ func iexport(out *bufio.Writer) {
hdr.uint64(dataLen)
// Flush output.
io.Copy(out, &hdr)
io.Copy(out, &p.strings)
io.Copy(out, &p.data0)
h := md5.New()
wr := io.MultiWriter(out, h)
io.Copy(wr, &hdr)
io.Copy(wr, &p.strings)
io.Copy(wr, &p.data0)
// Add fingerprint (used by linker object file).
// Attach this to the end, so tools (e.g. gcimporter) don't care.
copy(Ctxt.Fingerprint[:], h.Sum(nil)[:])
out.Write(Ctxt.Fingerprint[:])
}
@ -480,6 +484,7 @@ func (p *iexporter) doDecl(n *Node) {
t := n.Type
if t.IsInterface() {
w.typeExt(t)
break
}
@ -492,6 +497,7 @@ func (p *iexporter) doDecl(n *Node) {
w.signature(m.Type)
}
w.typeExt(t)
for _, m := range ms.Slice() {
w.methExt(m)
}
@ -997,9 +1003,8 @@ func (w *exportWriter) linkname(s *types.Sym) {
}
func (w *exportWriter) symIdx(s *types.Sym) {
if Ctxt.Flag_go115newobj {
lsym := s.Linksym()
if lsym.PkgIdx > goobj2.PkgIdxSelf || (lsym.PkgIdx == goobj2.PkgIdxInvalid && !lsym.Indexed()) || s.Linkname != "" {
if lsym.PkgIdx > goobj.PkgIdxSelf || (lsym.PkgIdx == goobj.PkgIdxInvalid && !lsym.Indexed()) || s.Linkname != "" {
// Don't export index for non-package symbols, linkname'd symbols,
// and symbols without an index. They can only be referenced by
// name.
@ -1010,6 +1015,16 @@ func (w *exportWriter) symIdx(s *types.Sym) {
w.int64(int64(lsym.SymIdx))
}
}
func (w *exportWriter) typeExt(t *types.Type) {
// For type T, export the index of type descriptor symbols of T and *T.
if i, ok := typeSymIdx[t]; ok {
w.int64(i[0])
w.int64(i[1])
return
}
w.symIdx(typesym(t))
w.symIdx(typesym(t.PtrTo()))
}
// Inline bodies.

View file

@ -10,7 +10,7 @@ package gc
import (
"cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/goobj2"
"cmd/internal/goobj"
"cmd/internal/obj"
"cmd/internal/src"
"encoding/binary"
@ -97,7 +97,7 @@ func (r *intReader) uint64() uint64 {
return i
}
func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj2.FingerprintType) {
func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) {
ir := &intReader{in, pkg}
version := ir.uint64()
@ -191,9 +191,9 @@ func iimport(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj2.FingerprintType
}
}
// Fingerprint
n, err := io.ReadFull(in, fingerprint[:])
if err != nil || n != len(fingerprint) {
// Fingerprint.
_, err = io.ReadFull(in, fingerprint[:])
if err != nil {
yyerror("import %s: error reading fingerprint", pkg.Path)
errorexit()
}
@ -316,6 +316,7 @@ func (r *importReader) doDecl(n *Node) {
resumecheckwidth()
if underlying.IsInterface() {
r.typeExt(t)
break
}
@ -346,6 +347,7 @@ func (r *importReader) doDecl(n *Node) {
}
t.Methods().Set(ms)
r.typeExt(t)
for _, m := range ms {
r.methExt(m)
}
@ -697,7 +699,6 @@ func (r *importReader) linkname(s *types.Sym) {
}
func (r *importReader) symIdx(s *types.Sym) {
if Ctxt.Flag_go115newobj {
lsym := s.Linksym()
idx := int32(r.int64())
if idx != -1 {
@ -708,7 +709,17 @@ func (r *importReader) symIdx(s *types.Sym) {
lsym.Set(obj.AttrIndexed, true)
}
}
func (r *importReader) typeExt(t *types.Type) {
i, pi := r.int64(), r.int64()
if i != -1 && pi != -1 {
typeSymIdx[t] = [2]int64{i, pi}
}
}
// Map imported type T to the index of type descriptor symbols of T and *T,
// so we can use index to reference the symbol.
var typeSymIdx = make(map[*types.Type][2]int64)
func (r *importReader) doInline(n *Node) {
if len(n.Func.Inl.Body) != 0 {

View file

@ -45,7 +45,6 @@ func fninit(n []*Node) {
if len(nf) > 0 {
lineno = nf[0].Pos // prolog/epilog gets line number of first init stmt
initializers := lookup("init")
disableExport(initializers)
fn := dclfunc(initializers, nod(OTFUNC, nil, nil))
for _, dcl := range dummyInitFn.Func.Dcl {
dcl.Name.Curfn = fn
@ -60,7 +59,7 @@ func fninit(n []*Node) {
Curfn = fn
typecheckslice(nf, ctxStmt)
Curfn = nil
funccompile(fn)
xtop = append(xtop, fn)
fns = append(fns, initializers.Linksym())
}
if dummyInitFn.Func.Dcl != nil {
@ -69,16 +68,14 @@ func fninit(n []*Node) {
// something's weird if we get here.
Fatalf("dummyInitFn still has declarations")
}
dummyInitFn = nil
// Record user init functions.
for i := 0; i < renameinitgen; i++ {
s := lookupN("init.", i)
fn := asNode(s.Def).Name.Defn
// Skip init functions with empty bodies.
// noder.go doesn't allow external init functions, and
// order.go has already removed any OEMPTY nodes, so
// checking Len() == 0 is sufficient here.
if fn.Nbody.Len() == 0 {
if fn.Nbody.Len() == 1 && fn.Nbody.First().Op == OEMPTY {
continue
}
fns = append(fns, s.Linksym())

View file

@ -14,7 +14,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/dwarf"
"cmd/internal/goobj2"
"cmd/internal/goobj"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
@ -281,11 +281,12 @@ func Main(archInit func(*Arch)) {
flag.StringVar(&benchfile, "bench", "", "append benchmark times to `file`")
flag.BoolVar(&smallFrames, "smallframes", false, "reduce the size limit for stack allocated objects")
flag.BoolVar(&Ctxt.UseBASEntries, "dwarfbasentries", Ctxt.UseBASEntries, "use base address selection entries in DWARF")
flag.BoolVar(&Ctxt.Flag_go115newobj, "go115newobj", true, "use new object file format")
flag.StringVar(&jsonLogOpt, "json", "", "version,destination for JSON compiler/optimizer logging")
objabi.Flagparse(usage)
Ctxt.Pkgpath = myimportpath
for _, f := range strings.Split(spectre, ",") {
f = strings.TrimSpace(f)
switch f {
@ -315,7 +316,7 @@ func Main(archInit func(*Arch)) {
// Record flags that affect the build result. (And don't
// record flags that don't, since that would cause spurious
// changes in the binary.)
recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre", "go115newobj")
recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre")
if smallFrames {
maxStackVarSize = 128 * 1024
@ -616,7 +617,7 @@ func Main(archInit func(*Arch)) {
var fcount int64
for i := 0; i < len(xtop); i++ {
n := xtop[i]
if op := n.Op; op == ODCLFUNC || op == OCLOSURE {
if n.Op == ODCLFUNC {
Curfn = n
decldepth = 1
saveerrors()
@ -641,6 +642,8 @@ func Main(archInit func(*Arch)) {
errorexit()
}
fninit(xtop)
// Phase 4: Decide how to capture closed variables.
// This needs to run before escape analysis,
// because variables captured by value do not escape.
@ -750,10 +753,6 @@ func Main(archInit func(*Arch)) {
}
timings.AddEvent(fcount, "funcs")
if nsavederrors+nerrors == 0 {
fninit(xtop)
}
compileFunctions()
if nowritebarrierrecCheck != nil {
@ -790,7 +789,7 @@ func Main(archInit func(*Arch)) {
// Write object data to disk.
timings.Start("be", "dumpobj")
dumpdata()
Ctxt.NumberSyms(false)
Ctxt.NumberSyms()
dumpobj()
if asmhdr != "" {
dumpasmhdr()
@ -808,6 +807,9 @@ func Main(archInit func(*Arch)) {
}
}
if len(funcStack) != 0 {
Fatalf("funcStack is non-empty: %v", len(funcStack))
}
if len(compilequeue) != 0 {
Fatalf("%d uncompiled functions", len(compilequeue))
}
@ -1279,7 +1281,7 @@ func importfile(f *Val) *types.Pkg {
c, _ = imp.ReadByte()
}
var fingerprint goobj2.FingerprintType
var fingerprint goobj.FingerprintType
switch c {
case '\n':
yyerror("cannot import %s: old export format no longer supported (recompile library)", path_)
@ -1489,7 +1491,7 @@ func recordFlags(flags ...string) {
return
}
s := Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + myimportpath)
s.Type = objabi.SDWARFINFO
s.Type = objabi.SDWARFCUINFO
// Sometimes (for example when building tests) we can link
// together two package main archives. So allow dups.
s.Set(obj.AttrDuplicateOK, true)
@ -1501,7 +1503,7 @@ func recordFlags(flags ...string) {
// compiled, so that the linker can save it in the compile unit's DIE.
func recordPackageName() {
s := Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + myimportpath)
s.Type = objabi.SDWARFINFO
s.Type = objabi.SDWARFCUINFO
// Sometimes (for example when building tests) we can link
// together two package main archives. So allow dups.
s.Set(obj.AttrDuplicateOK, true)

View file

@ -653,7 +653,7 @@ func (p *noder) expr(expr syntax.Expr) *Node {
obj := p.expr(expr.X)
if obj.Op == OPACK {
obj.Name.SetUsed(true)
return oldname(restrictlookup(expr.Sel.Value, obj.Name.Pkg))
return importName(obj.Name.Pkg.Lookup(expr.Sel.Value))
}
n := nodSym(OXDOT, obj, p.name(expr.Sel))
n.Pos = p.pos(expr) // lineno may have been changed by p.expr(expr.X)
@ -857,7 +857,7 @@ func (p *noder) interfaceType(expr *syntax.InterfaceType) *Node {
p.setlineno(method)
var n *Node
if method.Name == nil {
n = p.nodSym(method, ODCLFIELD, oldname(p.packname(method.Type)), nil)
n = p.nodSym(method, ODCLFIELD, importName(p.packname(method.Type)), nil)
} else {
mname := p.name(method.Name)
sig := p.typeExpr(method.Type)
@ -896,7 +896,7 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym {
def.Name.SetUsed(true)
pkg = def.Name.Pkg
}
return restrictlookup(expr.Sel.Value, pkg)
return pkg.Lookup(expr.Sel.Value)
}
panic(fmt.Sprintf("unexpected packname: %#v", expr))
}
@ -911,7 +911,7 @@ func (p *noder) embedded(typ syntax.Expr) *Node {
}
sym := p.packname(typ)
n := p.nodSym(typ, ODCLFIELD, oldname(sym), lookup(sym.Name))
n := p.nodSym(typ, ODCLFIELD, importName(sym), lookup(sym.Name))
n.SetEmbedded(true)
if isStar {
@ -1641,10 +1641,3 @@ func mkname(sym *types.Sym) *Node {
}
return n
}
func unparen(x *Node) *Node {
for x.Op == OPAREN {
x = x.Left
}
return x
}

View file

@ -113,12 +113,16 @@ func dumpCompilerObj(bout *bio.Writer) {
func dumpdata() {
externs := len(externdcl)
xtops := len(xtop)
dumpglobls()
addptabs()
exportlistLen := len(exportlist)
addsignats(externdcl)
dumpsignats()
dumptabs()
ptabsLen := len(ptabs)
itabsLen := len(itabs)
dumpimportstrings()
dumpbasictypes()
@ -129,9 +133,19 @@ func dumpdata() {
// number of types in a finite amount of code.
// In the typical case, we loop 0 or 1 times.
// It was not until issue 24761 that we found any code that required a loop at all.
for len(compilequeue) > 0 {
for {
for i := xtops; i < len(xtop); i++ {
n := xtop[i]
if n.Op == ODCLFUNC {
funccompile(n)
}
}
xtops = len(xtop)
compileFunctions()
dumpsignats()
if xtops == len(xtop) {
break
}
}
// Dump extra globals.
@ -149,6 +163,16 @@ func dumpdata() {
}
addGCLocals()
if exportlistLen != len(exportlist) {
Fatalf("exportlist changed after compile functions loop")
}
if ptabsLen != len(ptabs) {
Fatalf("ptabs changed after compile functions loop")
}
if itabsLen != len(itabs) {
Fatalf("itabs changed after compile functions loop")
}
}
func dumpLinkerObj(bout *bio.Writer) {
@ -166,7 +190,7 @@ func dumpLinkerObj(bout *bio.Writer) {
fmt.Fprintf(bout, "\n!\n")
obj.WriteObjFile(Ctxt, bout, myimportpath)
obj.WriteObjFile(Ctxt, bout)
}
func addptabs() {
@ -291,10 +315,8 @@ func addGCLocals() {
}
if x := s.Func.StackObjects; x != nil {
attr := int16(obj.RODATA)
if s.DuplicateOK() {
attr |= obj.DUPOK
}
ggloblsym(x, int32(len(x.P)), attr)
x.Set(obj.AttrStatic, true)
}
if x := s.Func.OpenCodedDeferInfo; x != nil {
ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
@ -354,10 +376,11 @@ func stringsym(pos src.XPos, s string) (data *obj.LSym) {
symdata := Ctxt.Lookup(symdataname)
if !symdata.SeenGlobl() {
if !symdata.OnList() {
// string data
off := dsname(symdata, 0, s, pos, "string")
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
symdata.Set(obj.AttrContentAddressable, true)
}
return symdata

View file

@ -206,8 +206,7 @@ func (o *Order) addrTemp(n *Node) *Node {
// TODO: expand this to all static composite literal nodes?
n = defaultlit(n, nil)
dowidth(n.Type)
vstat := staticname(n.Type)
vstat.MarkReadonly()
vstat := readonlystaticname(n.Type)
var s InitSchedule
s.staticassign(vstat, n)
if s.out != nil {
@ -289,20 +288,13 @@ func (o *Order) popTemp(mark ordermarker) {
o.temp = o.temp[:mark]
}
// cleanTempNoPop emits VARKILL and if needed VARLIVE instructions
// to *out for each temporary above the mark on the temporary stack.
// cleanTempNoPop emits VARKILL instructions to *out
// for each temporary above the mark on the temporary stack.
// It does not pop the temporaries from the stack.
func (o *Order) cleanTempNoPop(mark ordermarker) []*Node {
var out []*Node
for i := len(o.temp) - 1; i >= int(mark); i-- {
n := o.temp[i]
if n.Name.Keepalive() {
n.Name.SetKeepalive(false)
n.Name.SetAddrtaken(true) // ensure SSA keeps the n variable
live := nod(OVARLIVE, n, nil)
live = typecheck(live, ctxStmt)
out = append(out, live)
}
kill := nod(OVARKILL, n, nil)
kill = typecheck(kill, ctxStmt)
out = append(out, kill)
@ -501,8 +493,9 @@ func (o *Order) call(n *Node) {
// still alive when we pop the temp stack.
if arg.Op == OCONVNOP && arg.Left.Type.IsUnsafePtr() {
x := o.copyExpr(arg.Left, arg.Left.Type, false)
x.Name.SetKeepalive(true)
arg.Left = x
x.Name.SetAddrtaken(true) // ensure SSA keeps the x variable
n.Nbody.Append(typecheck(nod(OVARLIVE, x, nil), ctxStmt))
}
}
@ -928,7 +921,7 @@ func (o *Order) stmt(n *Node) {
n2.Ninit.Append(tmp2)
}
r.Left = o.newTemp(r.Right.Left.Type.Elem(), types.Haspointers(r.Right.Left.Type.Elem()))
r.Left = o.newTemp(r.Right.Left.Type.Elem(), r.Right.Left.Type.Elem().HasPointers())
tmp2 := nod(OAS, tmp1, r.Left)
tmp2 = typecheck(tmp2, ctxStmt)
n2.Ninit.Append(tmp2)
@ -1407,7 +1400,7 @@ func (o *Order) as2(n *Node) {
left := []*Node{}
for ni, l := range n.List.Slice() {
if !l.isBlank() {
tmp := o.newTemp(l.Type, types.Haspointers(l.Type))
tmp := o.newTemp(l.Type, l.Type.HasPointers())
n.List.SetIndex(ni, tmp)
tmplist = append(tmplist, tmp)
left = append(left, l)
@ -1429,7 +1422,7 @@ func (o *Order) okAs2(n *Node) {
var tmp1, tmp2 *Node
if !n.List.First().isBlank() {
typ := n.Right.Type
tmp1 = o.newTemp(typ, types.Haspointers(typ))
tmp1 = o.newTemp(typ, typ.HasPointers())
}
if !n.List.Second().isBlank() {

View file

@ -80,8 +80,8 @@ func cmpstackvarlt(a, b *Node) bool {
return a.Name.Used()
}
ap := types.Haspointers(a.Type)
bp := types.Haspointers(b.Type)
ap := a.Type.HasPointers()
bp := b.Type.HasPointers()
if ap != bp {
return ap
}
@ -176,7 +176,7 @@ func (s *ssafn) AllocFrame(f *ssa.Func) {
}
s.stksize += w
s.stksize = Rnd(s.stksize, int64(n.Type.Align))
if types.Haspointers(n.Type) {
if n.Type.HasPointers() {
s.stkptrsize = s.stksize
lastHasPtr = true
} else {
@ -428,9 +428,10 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
decls, dwarfVars := createDwarfVars(fnsym, fn.Func, apdecls)
// For each type referenced by the functions auto vars, attach a
// dummy relocation to the function symbol to insure that the type
// included in DWARF processing during linking.
// For each type referenced by the functions auto vars but not
// already referenced by a dwarf var, attach a dummy relocation to
// the function symbol to insure that the type included in DWARF
// processing during linking.
typesyms := []*obj.LSym{}
for t, _ := range fnsym.Func.Autot {
typesyms = append(typesyms, t)
@ -480,7 +481,7 @@ func declPos(decl *Node) src.XPos {
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
func createSimpleVars(apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
func createSimpleVars(fnsym *obj.LSym, apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
var vars []*dwarf.Var
var decls []*Node
selected := make(map[*Node]bool)
@ -490,13 +491,13 @@ func createSimpleVars(apDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
}
decls = append(decls, n)
vars = append(vars, createSimpleVar(n))
vars = append(vars, createSimpleVar(fnsym, n))
selected[n] = true
}
return decls, vars, selected
}
func createSimpleVar(n *Node) *dwarf.Var {
func createSimpleVar(fnsym *obj.LSym, n *Node) *dwarf.Var {
var abbrev int
offs := n.Xoffset
@ -506,7 +507,7 @@ func createSimpleVar(n *Node) *dwarf.Var {
if Ctxt.FixedFrameSize() == 0 {
offs -= int64(Widthptr)
}
if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) || objabi.GOARCH == "arm64" {
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
offs -= int64(Widthptr)
}
@ -519,6 +520,7 @@ func createSimpleVar(n *Node) *dwarf.Var {
}
typename := dwarf.InfoPrefix + typesymname(n.Type)
delete(fnsym.Func.Autot, ngotype(n).Linksym())
inlIndex := 0
if genDwarfInline > 1 {
if n.Name.InlFormal() || n.Name.InlLocal() {
@ -546,7 +548,7 @@ func createSimpleVar(n *Node) *dwarf.Var {
// createComplexVars creates recomposed DWARF vars with location lists,
// suitable for describing optimized code.
func createComplexVars(fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) {
func createComplexVars(fnsym *obj.LSym, fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) {
debugInfo := fn.DebugInfo
// Produce a DWARF variable entry for each user variable.
@ -561,7 +563,7 @@ func createComplexVars(fn *Func) ([]*Node, []*dwarf.Var, map[*Node]bool) {
ssaVars[debugInfo.Slots[slot].N.(*Node)] = true
}
if dvar := createComplexVar(fn, ssa.VarID(varID)); dvar != nil {
if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
decls = append(decls, n)
vars = append(vars, dvar)
}
@ -578,9 +580,9 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
var decls []*Node
var selected map[*Node]bool
if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil {
decls, vars, selected = createComplexVars(fn)
decls, vars, selected = createComplexVars(fnsym, fn)
} else {
decls, vars, selected = createSimpleVars(apDecls)
decls, vars, selected = createSimpleVars(fnsym, apDecls)
}
dcl := apDecls
@ -616,7 +618,7 @@ func createDwarfVars(fnsym *obj.LSym, fn *Func, apDecls []*Node) ([]*Node, []*dw
// Args not of SSA-able type are treated here; they
// are homed on the stack in a single place for the
// entire call.
vars = append(vars, createSimpleVar(n))
vars = append(vars, createSimpleVar(fnsym, n))
decls = append(decls, n)
continue
}
@ -701,7 +703,7 @@ func stackOffset(slot ssa.LocalSlot) int32 {
if Ctxt.FixedFrameSize() == 0 {
base -= int64(Widthptr)
}
if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) || objabi.GOARCH == "arm64" {
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
base -= int64(Widthptr)
}
@ -712,7 +714,7 @@ func stackOffset(slot ssa.LocalSlot) int32 {
}
// createComplexVar builds a single DWARF variable entry and location list.
func createComplexVar(fn *Func, varID ssa.VarID) *dwarf.Var {
func createComplexVar(fnsym *obj.LSym, fn *Func, varID ssa.VarID) *dwarf.Var {
debug := fn.DebugInfo
n := debug.Vars[varID].(*Node)
@ -727,6 +729,7 @@ func createComplexVar(fn *Func, varID ssa.VarID) *dwarf.Var {
}
gotype := ngotype(n).Linksym()
delete(fnsym.Func.Autot, gotype)
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
if genDwarfInline > 1 {

View file

@ -20,7 +20,7 @@ func typeWithoutPointers() *types.Type {
func typeWithPointers() *types.Type {
t := types.New(TSTRUCT)
f := &types.Field{Type: types.New(TPTR)}
f := &types.Field{Type: types.NewPtr(types.New(TINT))}
t.SetFields([]*types.Field{f})
return t
}
@ -181,14 +181,6 @@ func TestStackvarSort(t *testing.T) {
nodeWithClass(Node{Type: &types.Type{}, Sym: &types.Sym{Name: "xyz"}}, PAUTO),
nodeWithClass(Node{Type: typeWithoutPointers(), Sym: &types.Sym{}}, PAUTO),
}
// haspointers updates Type.Haspointers as a side effect, so
// exercise this function on all inputs so that reflect.DeepEqual
// doesn't produce false positives.
for i := range want {
types.Haspointers(want[i].Type)
types.Haspointers(inp[i].Type)
}
sort.Sort(byStackVar(inp))
if !reflect.DeepEqual(want, inp) {
t.Error("sort failed")

View file

@ -140,24 +140,14 @@ type Liveness struct {
regMaps []liveRegMask
cache progeffectscache
// These are only populated if open-coded defers are being used.
// List of vars/stack slots storing defer args
openDeferVars []openDeferVarInfo
// Map from defer arg OpVarDef to the block where the OpVarDef occurs.
openDeferVardefToBlockMap map[*Node]*ssa.Block
// Map of blocks that cannot reach a return or exit (panic)
nonReturnBlocks map[*ssa.Block]bool
}
type openDeferVarInfo struct {
n *Node // Var/stack slot storing a defer arg
varsIndex int // Index of variable in lv.vars
}
// LivenessMap maps from *ssa.Value to LivenessIndex.
type LivenessMap struct {
vals map[ssa.ID]LivenessIndex
// The set of live, pointer-containing variables at the deferreturn
// call (only set when open-coded defers are used).
deferreturn LivenessIndex
}
func (m *LivenessMap) reset() {
@ -168,6 +158,7 @@ func (m *LivenessMap) reset() {
delete(m.vals, k)
}
}
m.deferreturn = LivenessInvalid
}
func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) {
@ -268,7 +259,7 @@ func (v *varRegVec) AndNot(v1, v2 varRegVec) {
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
func livenessShouldTrack(n *Node) bool {
return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && types.Haspointers(n.Type)
return n.Op == ONAME && (n.Class() == PAUTO || n.Class() == PPARAM || n.Class() == PPARAMOUT) && n.Type.HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
@ -445,7 +436,7 @@ func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) {
case ssa.LocalSlot:
return mask
case *ssa.Register:
if ptrOnly && !v.Type.HasHeapPointer() {
if ptrOnly && !v.Type.HasPointers() {
return mask
}
regs[0] = loc
@ -460,7 +451,7 @@ func (lv *Liveness) regEffects(v *ssa.Value) (uevar, kill liveRegMask) {
if loc1 == nil {
continue
}
if ptrOnly && !v.Type.FieldType(i).HasHeapPointer() {
if ptrOnly && !v.Type.FieldType(i).HasPointers() {
continue
}
regs[nreg] = loc1.(*ssa.Register)
@ -542,7 +533,7 @@ func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkpt
if cap(lc.be) >= f.NumBlocks() {
lv.be = lc.be[:f.NumBlocks()]
}
lv.livenessMap = LivenessMap{lc.livenessMap.vals}
lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessInvalid}
lc.livenessMap.vals = nil
}
if lv.be == nil {
@ -577,13 +568,13 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 {
Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
}
if !t.HasPointers() {
// Note: this case ensures that pointers to go:notinheap types
// are not considered pointers by garbage collection and stack copying.
return
}
switch t.Etype {
case TINT8, TUINT8, TINT16, TUINT16,
TINT32, TUINT32, TINT64, TUINT64,
TINT, TUINT, TUINTPTR, TBOOL,
TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128:
case TPTR, TUNSAFEPTR, TFUNC, TCHAN, TMAP:
if off&int64(Widthptr-1) != 0 {
Fatalf("onebitwalktype1: invalid alignment, %v", t)
@ -870,7 +861,7 @@ func (lv *Liveness) hasStackMap(v *ssa.Value) bool {
// typedmemclr and typedmemmove are write barriers and
// deeply non-preemptible. They are unsafe points and
// hence should not have liveness maps.
if sym, _ := v.Aux.(*obj.LSym); sym == typedmemclr || sym == typedmemmove {
if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == typedmemclr || sym.Fn == typedmemmove) {
return false
}
return true
@ -893,58 +884,12 @@ func (lv *Liveness) hasStackMap(v *ssa.Value) bool {
func (lv *Liveness) prologue() {
lv.initcache()
if lv.fn.Func.HasDefer() && !lv.fn.Func.OpenCodedDeferDisallowed() {
lv.openDeferVardefToBlockMap = make(map[*Node]*ssa.Block)
for i, n := range lv.vars {
if n.Name.OpenDeferSlot() {
lv.openDeferVars = append(lv.openDeferVars, openDeferVarInfo{n: n, varsIndex: i})
}
}
// Find any blocks that cannot reach a return or a BlockExit
// (panic) -- these must be because of an infinite loop.
reachesRet := make(map[ssa.ID]bool)
blockList := make([]*ssa.Block, 0, 256)
for _, b := range lv.f.Blocks {
if b.Kind == ssa.BlockRet || b.Kind == ssa.BlockRetJmp || b.Kind == ssa.BlockExit {
blockList = append(blockList, b)
}
}
for len(blockList) > 0 {
b := blockList[0]
blockList = blockList[1:]
if reachesRet[b.ID] {
continue
}
reachesRet[b.ID] = true
for _, e := range b.Preds {
blockList = append(blockList, e.Block())
}
}
lv.nonReturnBlocks = make(map[*ssa.Block]bool)
for _, b := range lv.f.Blocks {
if !reachesRet[b.ID] {
lv.nonReturnBlocks[b] = true
//fmt.Println("No reach ret", lv.f.Name, b.ID, b.Kind)
}
}
}
for _, b := range lv.f.Blocks {
be := lv.blockEffects(b)
// Walk the block instructions backward and update the block
// effects with the each prog effects.
for j := len(b.Values) - 1; j >= 0; j-- {
if b.Values[j].Op == ssa.OpVarDef {
n := b.Values[j].Aux.(*Node)
if n.Name.OpenDeferSlot() {
lv.openDeferVardefToBlockMap[n] = b
}
}
pos, e := lv.valueEffects(b.Values[j])
regUevar, regKill := lv.regEffects(b.Values[j])
if e&varkill != 0 {
@ -961,20 +906,6 @@ func (lv *Liveness) prologue() {
}
}
// markDeferVarsLive marks each variable storing an open-coded defer arg as
// specially live in block b if the variable definition dominates block b.
func (lv *Liveness) markDeferVarsLive(b *ssa.Block, newliveout *varRegVec) {
// Only force computation of dominators if we have a block where we need
// to specially mark defer args live.
sdom := lv.f.Sdom()
for _, info := range lv.openDeferVars {
defB := lv.openDeferVardefToBlockMap[info.n]
if sdom.IsAncestorEq(defB, b) {
newliveout.vars.Set(int32(info.varsIndex))
}
}
}
// Solve the liveness dataflow equations.
func (lv *Liveness) solve() {
// These temporary bitvectors exist to avoid successive allocations and
@ -1018,23 +949,6 @@ func (lv *Liveness) solve() {
}
}
if lv.fn.Func.HasDefer() && !lv.fn.Func.OpenCodedDeferDisallowed() &&
(b.Kind == ssa.BlockExit || lv.nonReturnBlocks[b]) {
// Open-coded defer args slots must be live
// everywhere in a function, since a panic can
// occur (almost) anywhere. Force all appropriate
// defer arg slots to be live in BlockExit (panic)
// blocks and in blocks that do not reach a return
// (because of infinite loop).
//
// We are assuming that the defer exit code at
// BlockReturn/BlockReturnJmp accesses all of the
// defer args (with pointers), and so keeps them
// live. This analysis may have to be adjusted if
// that changes (because of optimizations).
lv.markDeferVarsLive(b, &newliveout)
}
if !be.liveout.Eq(newliveout) {
change = true
be.liveout.Copy(newliveout)
@ -1087,6 +1001,17 @@ func (lv *Liveness) epilogue() {
n.Name.SetNeedzero(true)
livedefer.Set(int32(i))
}
if n.Name.OpenDeferSlot() {
// Open-coded defer args slots must be live
// everywhere in a function, since a panic can
// occur (almost) anywhere. Because it is live
// everywhere, it must be zeroed on entry.
livedefer.Set(int32(i))
// It was already marked as Needzero when created.
if !n.Name.Needzero() {
Fatalf("all pointer-containing defer arg slots should have Needzero set")
}
}
}
}
@ -1188,6 +1113,17 @@ func (lv *Liveness) epilogue() {
lv.compact(b)
}
// If we have an open-coded deferreturn call, make a liveness map for it.
if lv.fn.Func.OpenCodedDeferDisallowed() {
lv.livenessMap.deferreturn = LivenessInvalid
} else {
lv.livenessMap.deferreturn = LivenessIndex{
stackMapIndex: lv.stackMapSet.add(livedefer),
regMapIndex: 0, // entry regMap, containing no live registers
isUnsafePoint: false,
}
}
// Done compacting. Throw out the stack map set.
lv.stackMaps = lv.stackMapSet.extractUniqe()
lv.stackMapSet = bvecSet{}
@ -1295,8 +1231,8 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
s := "live at "
if v == nil {
s += fmt.Sprintf("entry to %s:", lv.fn.funcname())
} else if sym, ok := v.Aux.(*obj.LSym); ok {
fn := sym.Name
} else if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
fn := sym.Fn.Name
if pos := strings.Index(fn, "."); pos >= 0 {
fn = fn[pos+1:]
}
@ -1563,6 +1499,7 @@ func (lv *Liveness) emit() (argsSym, liveSym, regsSym *obj.LSym) {
makeSym := func(tmpSym *obj.LSym) *obj.LSym {
return Ctxt.LookupInit(fmt.Sprintf("gclocals·%x", md5.Sum(tmpSym.P)), func(lsym *obj.LSym) {
lsym.P = tmpSym.P
lsym.Set(obj.AttrContentAddressable, true)
})
}
if !go115ReduceLiveness {

View file

@ -42,7 +42,7 @@ var omit_pkgs = []string{
"internal/cpu",
}
// Only insert racefuncenterfp/racefuncexit into the following packages.
// Don't insert racefuncenterfp/racefuncexit into the following packages.
// Memory accesses in the packages are either uninteresting or will cause false positives.
var norace_inst_pkgs = []string{"sync", "sync/atomic"}

View file

@ -334,7 +334,7 @@ func walkrange(n *Node) *Node {
hv1 := temp(t.Elem())
hv1.SetTypecheck(1)
if types.Haspointers(t.Elem()) {
if t.Elem().HasPointers() {
init = append(init, nod(OAS, hv1, nil))
}
hb := temp(types.Types[TBOOL])
@ -586,7 +586,7 @@ func arrayClear(n, v1, v2, a *Node) bool {
n.Nbody.Append(nod(OAS, hn, tmp))
var fn *Node
if a.Type.Elem().HasHeapPointer() {
if a.Type.Elem().HasPointers() {
// memclrHasPointers(hp, hn)
Curfn.Func.setWBPos(stmt.Pos)
fn = mkcall("memclrHasPointers", nil, nil, hp, hn)

View file

@ -119,7 +119,7 @@ func bmap(t *types.Type) *types.Type {
// the type of the overflow field to uintptr in this case.
// See comment on hmap.overflow in runtime/map.go.
otyp := types.NewPtr(bucket)
if !types.Haspointers(elemtype) && !types.Haspointers(keytype) {
if !elemtype.HasPointers() && !keytype.HasPointers() {
otyp = types.Types[TUINTPTR]
}
overflow := makefield("overflow", otyp)
@ -754,7 +754,7 @@ var kinds = []int{
// typeptrdata returns the length in bytes of the prefix of t
// containing pointer data. Anything after this offset is scalar data.
func typeptrdata(t *types.Type) int64 {
if !types.Haspointers(t) {
if !t.HasPointers() {
return 0
}
@ -788,7 +788,7 @@ func typeptrdata(t *types.Type) int64 {
// Find the last field that has pointers.
var lastPtrField *types.Field
for _, t1 := range t.Fields().Slice() {
if types.Haspointers(t1.Type) {
if t1.Type.HasPointers() {
lastPtrField = t1
}
}
@ -1168,6 +1168,15 @@ func dtypesym(t *types.Type) *obj.LSym {
if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc
// named types from other files are defined only by those files
if tbase.Sym != nil && tbase.Sym.Pkg != localpkg {
if i, ok := typeSymIdx[tbase]; ok {
lsym.Pkg = tbase.Sym.Pkg.Prefix
if t != tbase {
lsym.SymIdx = int32(i[1])
} else {
lsym.SymIdx = int32(i[0])
}
lsym.Set(obj.AttrIndexed, true)
}
return lsym
}
// TODO(mdempsky): Investigate whether this can happen.
@ -1577,9 +1586,7 @@ func dumptabs() {
}
// Nothing writes static itabs, so they are read only.
ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym()
dsymptr(ilink, 0, i.lsym, 0)
ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA))
i.lsym.Set(obj.AttrContentAddressable, true)
}
// process ptabs
@ -1742,6 +1749,7 @@ func dgcptrmask(t *types.Type) *obj.LSym {
duint8(lsym, i, x)
}
ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
lsym.Set(obj.AttrContentAddressable, true)
}
return lsym
}
@ -1753,7 +1761,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) {
for i := range ptrmask {
ptrmask[i] = 0
}
if !types.Haspointers(t) {
if !t.HasPointers() {
return
}
@ -1822,7 +1830,7 @@ func (p *GCProg) end() {
func (p *GCProg) emit(t *types.Type, offset int64) {
dowidth(t)
if !types.Haspointers(t) {
if !t.HasPointers() {
return
}
if t.Width == int64(Widthptr) {

View file

@ -106,18 +106,16 @@ func walkselect(sel *Node) {
}
func walkselectcases(cases *Nodes) []*Node {
n := cases.Len()
ncas := cases.Len()
sellineno := lineno
// optimization: zero-case select
if n == 0 {
if ncas == 0 {
return []*Node{mkcall("block", nil, nil)}
}
// optimization: one-case select: single op.
// TODO(rsc): Reenable optimization once order.go can handle it.
// golang.org/issue/7672.
if n == 1 {
if ncas == 1 {
cas := cases.First()
setlineno(cas)
l := cas.Ninit.Slice()
@ -125,17 +123,14 @@ func walkselectcases(cases *Nodes) []*Node {
n := cas.Left
l = append(l, n.Ninit.Slice()...)
n.Ninit.Set(nil)
var ch *Node
switch n.Op {
default:
Fatalf("select %v", n.Op)
// ok already
case OSEND:
ch = n.Left
// already ok
case OSELRECV, OSELRECV2:
ch = n.Right.Left
if n.Op == OSELRECV || n.List.Len() == 0 {
if n.Left == nil {
n = n.Right
@ -159,16 +154,7 @@ func walkselectcases(cases *Nodes) []*Node {
n = typecheck(n, ctxStmt)
}
// if ch == nil { block() }; n;
a := nod(OIF, nil, nil)
a.Left = nod(OEQ, ch, nodnil())
var ln Nodes
ln.Set(l)
a.Nbody.Set1(mkcall("block", nil, &ln))
l = ln.Slice()
a = typecheck(a, ctxStmt)
l = append(l, a, n)
l = append(l, n)
}
l = append(l, cas.Nbody.Slice()...)
@ -178,10 +164,12 @@ func walkselectcases(cases *Nodes) []*Node {
// convert case value arguments to addresses.
// this rewrite is used by both the general code and the next optimization.
var dflt *Node
for _, cas := range cases.Slice() {
setlineno(cas)
n := cas.Left
if n == nil {
dflt = cas
continue
}
switch n.Op {
@ -202,15 +190,10 @@ func walkselectcases(cases *Nodes) []*Node {
}
// optimization: two-case select but one is default: single non-blocking op.
if n == 2 && (cases.First().Left == nil || cases.Second().Left == nil) {
var cas *Node
var dflt *Node
if cases.First().Left == nil {
if ncas == 2 && dflt != nil {
cas := cases.First()
if cas == dflt {
cas = cases.Second()
dflt = cases.First()
} else {
dflt = cases.Second()
cas = cases.First()
}
n := cas.Left
@ -228,8 +211,6 @@ func walkselectcases(cases *Nodes) []*Node {
case OSELRECV:
// if selectnbrecv(&v, c) { body } else { default body }
r = nod(OIF, nil, nil)
r.Ninit.Set(cas.Ninit.Slice())
ch := n.Right.Left
elem := n.Left
if elem == nil {
@ -239,8 +220,6 @@ func walkselectcases(cases *Nodes) []*Node {
case OSELRECV2:
// if selectnbrecv2(&v, &received, c) { body } else { default body }
r = nod(OIF, nil, nil)
r.Ninit.Set(cas.Ninit.Slice())
ch := n.Right.Left
elem := n.Left
if elem == nil {
@ -257,54 +236,62 @@ func walkselectcases(cases *Nodes) []*Node {
return []*Node{r, nod(OBREAK, nil, nil)}
}
if dflt != nil {
ncas--
}
casorder := make([]*Node, ncas)
nsends, nrecvs := 0, 0
var init []*Node
// generate sel-struct
lineno = sellineno
selv := temp(types.NewArray(scasetype(), int64(n)))
selv := temp(types.NewArray(scasetype(), int64(ncas)))
r := nod(OAS, selv, nil)
r = typecheck(r, ctxStmt)
init = append(init, r)
order := temp(types.NewArray(types.Types[TUINT16], 2*int64(n)))
r = nod(OAS, order, nil)
r = typecheck(r, ctxStmt)
init = append(init, r)
// No initialization for order; runtime.selectgo is responsible for that.
order := temp(types.NewArray(types.Types[TUINT16], 2*int64(ncas)))
var pc0, pcs *Node
if flag_race {
pcs = temp(types.NewArray(types.Types[TUINTPTR], int64(ncas)))
pc0 = typecheck(nod(OADDR, nod(OINDEX, pcs, nodintconst(0)), nil), ctxExpr)
} else {
pc0 = nodnil()
}
// register cases
for i, cas := range cases.Slice() {
for _, cas := range cases.Slice() {
setlineno(cas)
init = append(init, cas.Ninit.Slice()...)
cas.Ninit.Set(nil)
// Keep in sync with runtime/select.go.
const (
caseNil = iota
caseRecv
caseSend
caseDefault
)
n := cas.Left
if n == nil { // default:
continue
}
var i int
var c, elem *Node
var kind int64 = caseDefault
if n := cas.Left; n != nil {
init = append(init, n.Ninit.Slice()...)
switch n.Op {
default:
Fatalf("select %v", n.Op)
case OSEND:
kind = caseSend
i = nsends
nsends++
c = n.Left
elem = n.Right
case OSELRECV, OSELRECV2:
kind = caseRecv
nrecvs++
i = ncas - nrecvs
c = n.Right.Left
elem = n.Left
}
}
casorder[i] = cas
setField := func(f string, val *Node) {
r := nod(OAS, nodSym(ODOT, nod(OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
@ -312,11 +299,8 @@ func walkselectcases(cases *Nodes) []*Node {
init = append(init, r)
}
setField("kind", nodintconst(kind))
if c != nil {
c = convnop(c, types.Types[TUNSAFEPTR])
setField("c", c)
}
if elem != nil {
elem = convnop(elem, types.Types[TUNSAFEPTR])
setField("elem", elem)
@ -324,11 +308,14 @@ func walkselectcases(cases *Nodes) []*Node {
// TODO(mdempsky): There should be a cleaner way to
// handle this.
if instrumenting {
r = mkcall("selectsetpc", nil, nil, bytePtrToIndex(selv, int64(i)))
if flag_race {
r = mkcall("selectsetpc", nil, nil, nod(OADDR, nod(OINDEX, pcs, nodintconst(int64(i))), nil))
init = append(init, r)
}
}
if nsends+nrecvs != ncas {
Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
}
// run the select
lineno = sellineno
@ -337,23 +324,23 @@ func walkselectcases(cases *Nodes) []*Node {
r = nod(OAS2, nil, nil)
r.List.Set2(chosen, recvOK)
fn := syslook("selectgo")
r.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), nodintconst(int64(n))))
r.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
r = typecheck(r, ctxStmt)
init = append(init, r)
// selv and order are no longer alive after selectgo.
init = append(init, nod(OVARKILL, selv, nil))
init = append(init, nod(OVARKILL, order, nil))
if flag_race {
init = append(init, nod(OVARKILL, pcs, nil))
}
// dispatch cases
for i, cas := range cases.Slice() {
setlineno(cas)
cond := nod(OEQ, chosen, nodintconst(int64(i)))
dispatch := func(cond, cas *Node) {
cond = typecheck(cond, ctxExpr)
cond = defaultlit(cond, nil)
r = nod(OIF, cond, nil)
r := nod(OIF, cond, nil)
if n := cas.Left; n != nil && n.Op == OSELRECV2 {
x := nod(OAS, n.List.First(), recvOK)
@ -366,6 +353,15 @@ func walkselectcases(cases *Nodes) []*Node {
init = append(init, r)
}
if dflt != nil {
setlineno(dflt)
dispatch(nod(OLT, chosen, nodintconst(0)), dflt)
}
for i, cas := range casorder {
setlineno(cas)
dispatch(nod(OEQ, chosen, nodintconst(int64(i))), cas)
}
return init
}
@ -384,9 +380,6 @@ func scasetype() *types.Type {
scase = tostruct([]*Node{
namedfield("c", types.Types[TUNSAFEPTR]),
namedfield("elem", types.Types[TUNSAFEPTR]),
namedfield("kind", types.Types[TUINT16]),
namedfield("pc", types.Types[TUINTPTR]),
namedfield("releasetime", types.Types[TINT64]),
})
scase.SetNoalg(true)
}

View file

@ -6,6 +6,7 @@ package gc
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"fmt"
)
@ -277,6 +278,8 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool {
return Isconst(val, CTNIL)
}
markTypeUsedInInterface(val.Type)
var itab *Node
if l.Type.IsEmptyInterface() {
itab = typename(val.Type)
@ -353,14 +356,22 @@ func (c initContext) String() string {
var statuniqgen int // name generator for static temps
// staticname returns a name backed by a static data symbol.
// Callers should call n.MarkReadonly on the
// returned node for readonly nodes.
// staticname returns a name backed by a (writable) static data symbol.
// Use readonlystaticname for read-only node.
func staticname(t *types.Type) *Node {
// Don't use lookupN; it interns the resulting string, but these are all unique.
n := newname(lookup(fmt.Sprintf(".stmp_%d", statuniqgen)))
n := newname(lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
statuniqgen++
addvar(n, t, PEXTERN)
n.Sym.Linksym().Set(obj.AttrLocal, true)
return n
}
// readonlystaticname returns a name backed by a (writable) static data symbol.
func readonlystaticname(t *types.Type) *Node {
n := staticname(t)
n.MarkReadonly()
n.Sym.Linksym().Set(obj.AttrContentAddressable, true)
return n
}
@ -495,6 +506,7 @@ const (
// fixedlit handles struct, array, and slice literals.
// TODO: expand documentation.
func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes) {
isBlank := var_ == nblank
var splitnode func(*Node) (a *Node, value *Node)
switch n.Op {
case OARRAYLIT, OSLICELIT:
@ -509,6 +521,9 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes)
}
a := nod(OINDEX, var_, nodintconst(k))
k++
if isBlank {
a = nblank
}
return a, r
}
case OSTRUCTLIT:
@ -516,7 +531,7 @@ func fixedlit(ctxt initContext, kind initKind, n *Node, var_ *Node, init *Nodes)
if r.Op != OSTRUCTKEY {
Fatalf("fixedlit: rhs not OSTRUCTKEY: %v", r)
}
if r.Sym.IsBlank() {
if r.Sym.IsBlank() || isBlank {
return nblank, r.Left
}
setlineno(r)
@ -624,9 +639,10 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
mode := getdyn(n, true)
if mode&initConst != 0 && !isSmallSliceLit(n) {
vstat = staticname(t)
if ctxt == inInitFunction {
vstat.MarkReadonly()
vstat = readonlystaticname(t)
} else {
vstat = staticname(t)
}
fixedlit(ctxt, initKindStatic, n, vstat, init)
}
@ -770,10 +786,8 @@ func maplit(n *Node, m *Node, init *Nodes) {
dowidth(te)
// make and initialize static arrays
vstatk := staticname(tk)
vstatk.MarkReadonly()
vstate := staticname(te)
vstate.MarkReadonly()
vstatk := readonlystaticname(tk)
vstate := readonlystaticname(te)
datak := nod(OARRAYLIT, nil, nil)
datae := nod(OARRAYLIT, nil, nil)
@ -894,8 +908,7 @@ func anylit(n *Node, var_ *Node, init *Nodes) {
if var_.isSimpleName() && n.List.Len() > 4 {
// lay out static data
vstat := staticname(t)
vstat.MarkReadonly()
vstat := readonlystaticname(t)
ctxt := inInitFunction
if n.Op == OARRAYLIT {

View file

@ -10,6 +10,7 @@ import (
"html"
"os"
"sort"
"strings"
"bufio"
"bytes"
@ -295,7 +296,10 @@ func (s *state) emitOpenDeferInfo() {
// worker indicates which of the backend workers is doing the processing.
func buildssa(fn *Node, worker int) *ssa.Func {
name := fn.funcname()
printssa := name == ssaDump
printssa := false
if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", or a package.name e.g. "compress/gzip.(*Reader).Reset"
printssa = name == ssaDump || myimportpath+"."+name == ssaDump
}
var astBuf *bytes.Buffer
if printssa {
astBuf = &bytes.Buffer{}
@ -329,8 +333,8 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.f.Config = ssaConfig
s.f.Cache = &ssaCaches[worker]
s.f.Cache.Reset()
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name)
s.f.Name = name
s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
s.f.PrintOrHtmlSSA = printssa
if fn.Func.Pragma&Nosplit != 0 {
s.f.NoSplit = true
@ -338,6 +342,10 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.panics = map[funcLine]*ssa.Block{}
s.softFloat = s.config.SoftFloat
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
s.f.Entry.Pos = fn.Pos
if printssa {
s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDumpFile, s.f, ssaDumpCFG)
// TODO: generate and print a mapping from nodes to values and blocks
@ -345,9 +353,6 @@ func buildssa(fn *Node, worker int) *ssa.Func {
s.f.HTMLWriter.WriteAST("AST", astBuf)
}
// Allocate starting block
s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
// Allocate starting values
s.labels = map[string]*ssaLabel{}
s.labeledNodes = map[*Node]*ssaLabel{}
@ -647,6 +652,8 @@ type state struct {
lastDeferExit *ssa.Block // Entry block of last defer exit code we generated
lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated
lastDeferCount int // Number of defers encountered at that point
prevCall *ssa.Value // the previous call; use this to tie results to the call op.
}
type funcLine struct {
@ -801,6 +808,11 @@ func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.
return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
}
// newValue2A adds a new value with two arguments and an aux value to the current block.
func (s *state) newValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value {
return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
}
// newValue2Apos adds a new value with two arguments and an aux value to the current block.
// isStmt determines whether the created values may be a statement or not
// (i.e., false means never, yes means maybe).
@ -1067,7 +1079,7 @@ func (s *state) stmt(n *Node) {
fallthrough
case OCALLMETH, OCALLINTER:
s.call(n, callNormal)
s.callResult(n, callNormal)
if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC {
if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" ||
n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
@ -1099,10 +1111,10 @@ func (s *state) stmt(n *Node) {
if n.Esc == EscNever {
d = callDeferStack
}
s.call(n.Left, d)
s.callResult(n.Left, d)
}
case OGO:
s.call(n.Left, callGo)
s.callResult(n.Left, callGo)
case OAS2DOTTYPE:
res, resok := s.dottype(n.Right, true)
@ -2109,7 +2121,7 @@ func (s *state) expr(n *Node) *ssa.Value {
}
// unsafe.Pointer <--> *T
if to.Etype == TUNSAFEPTR && from.IsPtrShaped() || from.Etype == TUNSAFEPTR && to.IsPtrShaped() {
if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
return v
}
@ -2545,8 +2557,23 @@ func (s *state) expr(n *Node) *ssa.Value {
return s.addr(n.Left)
case ORESULT:
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall {
// Do the old thing
addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
return s.load(n.Type, addr)
}
which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
if which == -1 {
// Do the old thing // TODO: Panic instead.
addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
return s.load(n.Type, addr)
}
if canSSAType(n.Type) {
return s.newValue1I(ssa.OpSelectN, n.Type, which, s.prevCall)
} else {
addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type), which, s.prevCall)
return s.load(n.Type, addr)
}
case ODEREF:
p := s.exprPtr(n.Left, n.Bounded(), n.Pos)
@ -2706,8 +2733,7 @@ func (s *state) expr(n *Node) *ssa.Value {
fallthrough
case OCALLINTER, OCALLMETH:
a := s.call(n, callNormal)
return s.load(n.Type, a)
return s.callResult(n, callNormal)
case OGETG:
return s.newValue1(ssa.OpGetG, n.Type, s.mem())
@ -3580,8 +3606,7 @@ func init() {
addF("math", "FMA",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
a := s.call(n, callNormal)
s.vars[n] = s.load(types.Types[TFLOAT64], a)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[TFLOAT64])
}
v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[TBOOL], x86HasFMA)
@ -3602,8 +3627,7 @@ func init() {
// Call the pure Go version.
s.startBlock(bFalse)
a := s.call(n, callNormal)
s.vars[n] = s.load(types.Types[TFLOAT64], a)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
@ -3614,8 +3638,7 @@ func init() {
addF("math", "FMA",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
if !s.config.UseFMA {
a := s.call(n, callNormal)
s.vars[n] = s.load(types.Types[TFLOAT64], a)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
return s.variable(n, types.Types[TFLOAT64])
}
addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), armHasVFPv4, s.sb)
@ -3637,8 +3660,7 @@ func init() {
// Call the pure Go version.
s.startBlock(bFalse)
a := s.call(n, callNormal)
s.vars[n] = s.load(types.Types[TFLOAT64], a)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
@ -3667,8 +3689,7 @@ func init() {
// Call the pure Go version.
s.startBlock(bFalse)
a := s.call(n, callNormal)
s.vars[n] = s.load(types.Types[TFLOAT64], a)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
@ -3878,8 +3899,7 @@ func init() {
// Call the pure Go version.
s.startBlock(bFalse)
a := s.call(n, callNormal)
s.vars[n] = s.load(types.Types[TINT], a)
s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT]
s.endBlock().AddEdgeTo(bEnd)
// Merge results.
@ -4206,7 +4226,7 @@ func (s *state) openDeferSave(n *Node, t *types.Type, val *ssa.Value) *ssa.Value
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type), argTemp, s.sp, s.mem(), false)
}
if types.Haspointers(t) {
if t.HasPointers() {
// Since we may use this argTemp during exit depending on the
// deferBits, we must define it unconditionally on entry.
// Therefore, we must make sure it is zeroed out in the entry
@ -4271,16 +4291,20 @@ func (s *state) openDeferExit() {
argStart := Ctxt.FixedFrameSize()
fn := r.n.Left
stksize := fn.Type.ArgWidth()
var ACArgs []ssa.Param
var ACResults []ssa.Param
if r.rcvr != nil {
// rcvr in case of OCALLINTER
v := s.load(r.rcvr.Type.Elem(), r.rcvr)
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
s.store(types.Types[TUINTPTR], addr, v)
}
for j, argAddrVal := range r.argVals {
f := getParam(r.n, j)
pt := types.NewPtr(f.Type)
addr := s.constOffPtrSP(pt, argStart+f.Offset)
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart + f.Offset)})
if !canSSAType(f.Type) {
s.move(f.Type, addr, argAddrVal)
} else {
@ -4293,10 +4317,10 @@ func (s *state) openDeferExit() {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
codeptr := s.rawLoad(types.Types[TUINTPTR], v)
call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, v, s.mem())
call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, v, s.mem())
} else {
// Do a static call if the original call was a static function or method
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn.Sym.Linksym(), s.mem())
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults), s.mem())
}
call.AuxInt = stksize
s.vars[&memVar] = call
@ -4308,39 +4332,59 @@ func (s *state) openDeferExit() {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
}
if r.rcvrNode != nil {
if types.Haspointers(r.rcvrNode.Type) {
if r.rcvrNode.Type.HasPointers() {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
}
}
for _, argNode := range r.argNodes {
if types.Haspointers(argNode.Type) {
if argNode.Type.HasPointers() {
s.vars[&memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
}
}
if i == len(s.openDefers)-1 {
// Record the call of the first defer. This will be used
// to set liveness info for the deferreturn (which is also
// used for any location that causes a runtime panic)
s.f.LastDeferExit = call
}
s.endBlock()
s.startBlock(bEnd)
}
}
func (s *state) callResult(n *Node, k callKind) *ssa.Value {
return s.call(n, k, false)
}
func (s *state) callAddr(n *Node, k callKind) *ssa.Value {
return s.call(n, k, true)
}
// Calls the function n using the specified call type.
// Returns the address of the return value (or nil if none).
func (s *state) call(n *Node, k callKind) *ssa.Value {
func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value {
s.prevCall = nil
var sym *types.Sym // target symbol (if static)
var closure *ssa.Value // ptr to closure to run (if dynamic)
var codeptr *ssa.Value // ptr to target code (if dynamic)
var rcvr *ssa.Value // receiver to set
fn := n.Left
var ACArgs []ssa.Param
var ACResults []ssa.Param
var callArgs []*ssa.Value
res := n.Left.Type.Results()
if k == callNormal {
nf := res.NumFields()
for i := 0; i < nf; i++ {
fp := res.Field(i)
ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + Ctxt.FixedFrameSize())})
}
}
testLateExpansion := false
switch n.Op {
case OCALLFUNC:
if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC {
sym = fn.Sym
if !returnResultAddr && strings.Contains(sym.Name, "testLateExpansion") {
testLateExpansion = true
}
break
}
closure = s.expr(fn)
@ -4355,6 +4399,9 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
}
if k == callNormal {
sym = fn.Sym
if !returnResultAddr && strings.Contains(sym.Name, "testLateExpansion") {
testLateExpansion = true
}
break
}
closure = s.getMethodClosure(fn)
@ -4434,10 +4481,12 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
// Call runtime.deferprocStack with pointer to _defer record.
arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize())
s.store(types.Types[TUINTPTR], arg0, addr)
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, deferprocStack, s.mem())
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(Ctxt.FixedFrameSize())})
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults), s.mem())
if stksize < int64(Widthptr) {
// We need room for both the call to deferprocStack and the call to
// the deferred function.
// TODO Revisit this if/when we pass args in registers.
stksize = int64(Widthptr)
}
call.AuxInt = stksize
@ -4449,10 +4498,20 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
if k != callNormal {
// Write argsize and closure (args to newproc/deferproc).
argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINT32], Offset: int32(argStart)})
if testLateExpansion {
callArgs = append(callArgs, argsize)
} else {
addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
s.store(types.Types[TUINT32], addr, argsize)
addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
}
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart) + int32(Widthptr)})
if testLateExpansion {
callArgs = append(callArgs, closure)
} else {
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
s.store(types.Types[TUINTPTR], addr, closure)
}
stksize += 2 * int64(Widthptr)
argStart += 2 * int64(Widthptr)
}
@ -4460,28 +4519,39 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
// Set receiver (for interface calls).
if rcvr != nil {
addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)})
if testLateExpansion {
callArgs = append(callArgs, rcvr)
} else {
s.store(types.Types[TUINTPTR], addr, rcvr)
}
}
// Write args.
t := n.Left.Type
args := n.Rlist.Slice()
if n.Op == OCALLMETH {
f := t.Recv()
s.storeArg(args[0], f.Type, argStart+f.Offset)
ACArg, arg := s.putArg(args[0], f.Type, argStart+f.Offset, testLateExpansion)
ACArgs = append(ACArgs, ACArg)
callArgs = append(callArgs, arg)
args = args[1:]
}
for i, n := range args {
f := t.Params().Field(i)
s.storeArg(n, f.Type, argStart+f.Offset)
ACArg, arg := s.putArg(n, f.Type, argStart+f.Offset, testLateExpansion)
ACArgs = append(ACArgs, ACArg)
callArgs = append(callArgs, arg)
}
callArgs = append(callArgs, s.mem())
// call target
switch {
case k == callDefer:
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, deferproc, s.mem())
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(deferproc, ACArgs, ACResults), s.mem())
case k == callGo:
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, newproc, s.mem())
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(newproc, ACArgs, ACResults), s.mem())
case closure != nil:
// rawLoad because loading the code pointer from a
// closure is always safe, but IsSanitizerSafeAddr
@ -4489,17 +4559,35 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
// critical that we not clobber any arguments already
// stored onto the stack.
codeptr = s.rawLoad(types.Types[TUINTPTR], closure)
call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem())
call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, closure, s.mem())
case codeptr != nil:
call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem())
call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem())
case sym != nil:
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem())
if testLateExpansion {
var tys []*types.Type
aux := ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults)
for i := int64(0); i < aux.NResults(); i++ {
tys = append(tys, aux.TypeOfResult(i))
}
tys = append(tys, types.TypeMem)
call = s.newValue0A(ssa.OpStaticLECall, types.NewResults(tys), aux)
call.AddArgs(callArgs...)
} else {
call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults), s.mem())
}
default:
s.Fatalf("bad call type %v %v", n.Op, n)
}
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
}
if testLateExpansion {
s.prevCall = call
s.vars[&memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
} else {
s.vars[&memVar] = call
}
// Insert OVARLIVE nodes
s.stmtList(n.Nbody)
// Finish block for defers
if k == callDefer || k == callDeferStack {
@ -4517,15 +4605,21 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
s.startBlock(bNext)
}
res := n.Left.Type.Results()
if res.NumFields() == 0 || k != callNormal {
// call has no return value. Continue with the next statement.
return nil
}
fp := res.Field(0)
if returnResultAddr {
return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize())
}
if testLateExpansion {
return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
}
return s.load(n.Type, s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize()))
}
// maybeNilCheckClosure checks if a nil check of a closure is needed in some
// architecture-dependent situations and, if so, emits the nil check.
func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
@ -4621,7 +4715,17 @@ func (s *state) addr(n *Node) *ssa.Value {
}
case ORESULT:
// load return from callee
if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall {
return s.constOffPtrSP(t, n.Xoffset)
}
which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset)
if which == -1 {
// Do the old thing // TODO: Panic instead.
return s.constOffPtrSP(t, n.Xoffset)
}
x := s.newValue1I(ssa.OpSelectNAddr, t, which, s.prevCall)
return x
case OINDEX:
if n.Left.Type.IsSlice() {
a := s.expr(n.Left)
@ -4652,7 +4756,7 @@ func (s *state) addr(n *Node) *ssa.Value {
addr := s.addr(n.Left)
return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
case OCALLFUNC, OCALLINTER, OCALLMETH:
return s.call(n, callNormal)
return s.callAddr(n, callNormal)
case ODOTTYPE:
v, _ := s.dottype(n, false)
if v.Op != ssa.OpLoad {
@ -4911,20 +5015,32 @@ func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
// The call is added to the end of the current block.
// If returns is false, the block is marked as an exit block.
func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
s.prevCall = nil
// Write args to the stack
off := Ctxt.FixedFrameSize()
var ACArgs []ssa.Param
var ACResults []ssa.Param
for _, arg := range args {
t := arg.Type
off = Rnd(off, t.Alignment())
ptr := s.constOffPtrSP(t.PtrTo(), off)
size := t.Size()
ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)})
s.store(t, ptr, arg)
off += size
}
off = Rnd(off, int64(Widthreg))
// Accumulate results types and offsets
offR := off
for _, t := range results {
offR = Rnd(offR, t.Alignment())
ACResults = append(ACResults, ssa.Param{Type: t, Offset: int32(offR)})
offR += t.Size()
}
// Issue call
call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn, s.mem())
call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(fn, ACArgs, ACResults), s.mem())
s.vars[&memVar] = call
if !returns {
@ -4959,7 +5075,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
s.instrument(t, left, true)
if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) {
if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
// Known to not have write barrier. Store the whole type.
s.vars[&memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
return
@ -4971,7 +5087,7 @@ func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask,
// TODO: if the writebarrier pass knows how to reorder stores,
// we can do a single store here as long as skip==0.
s.storeTypeScalars(t, left, right, skip)
if skip&skipPtr == 0 && types.Haspointers(t) {
if skip&skipPtr == 0 && t.HasPointers() {
s.storeTypePtrs(t, left, right)
}
}
@ -5043,7 +5159,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
n := t.NumFields()
for i := 0; i < n; i++ {
ft := t.FieldType(i)
if !types.Haspointers(ft) {
if !ft.HasPointers() {
continue
}
addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
@ -5059,9 +5175,22 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
}
}
func (s *state) storeArg(n *Node, t *types.Type, off int64) {
// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param for the call.
// If forLateExpandedCall is true, it returns the argument value to pass to the call operation.
// If forLateExpandedCall is false, then the value is stored at the specified stack offset, and the returned value is nil.
func (s *state) putArg(n *Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
var a *ssa.Value
if forLateExpandedCall {
if !canSSAType(t) {
a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
} else {
a = s.expr(n)
}
} else {
s.storeArgWithBase(n, t, s.sp, off)
}
return ssa.Param{Type: t, Offset: int32(off)}, a
}
func (s *state) storeArgWithBase(n *Node, t *types.Type, base *ssa.Value, off int64) {
pt := types.NewPtr(t)
@ -5807,11 +5936,6 @@ type SSAGenState struct {
// wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
OnWasmStackSkipped int
// Liveness index for the first function call in the final defer exit code
// path that we generated. All defer functions and args should be live at
// this point. This will be used to set the liveness for the deferreturn.
lastDeferLiveness LivenessIndex
}
// Prog appends a new Prog.
@ -6056,12 +6180,6 @@ func genssa(f *ssa.Func, pp *Progs) {
// instruction.
s.pp.nextLive = s.livenessMap.Get(v)
// Remember the liveness index of the first defer call of
// the last defer exit
if v.Block.Func.LastDeferExit != nil && v == v.Block.Func.LastDeferExit {
s.lastDeferLiveness = s.pp.nextLive
}
// Special case for first line in function; move it to the start.
if firstPos != src.NoXPos {
s.SetPos(firstPos)
@ -6122,7 +6240,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// When doing open-coded defers, generate a disconnected call to
// deferreturn and a return. This will be used to during panic
// recovery to unwind the stack and return back to the runtime.
s.pp.nextLive = s.lastDeferLiveness
s.pp.nextLive = s.livenessMap.deferreturn
gencallret(pp, Deferreturn)
}
@ -6195,7 +6313,7 @@ func genssa(f *ssa.Func, pp *Progs) {
// Resolve branches, and relax DefaultStmt into NotStmt
for _, br := range s.Branches {
br.P.To.Val = s.bstart[br.B.ID]
br.P.To.SetTarget(s.bstart[br.B.ID])
if br.P.Pos.IsStmt() != src.PosIsStmt {
br.P.Pos = br.P.Pos.WithNotStmt()
} else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
@ -6366,6 +6484,9 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
}
// Add symbol's offset from its base register.
switch n := v.Aux.(type) {
case *ssa.AuxCall:
a.Name = obj.NAME_EXTERN
a.Sym = n.Fn
case *obj.LSym:
a.Name = obj.NAME_EXTERN
a.Sym = n
@ -6552,10 +6673,10 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
} else {
p.Pos = v.Pos.WithNotStmt()
}
if sym, ok := v.Aux.(*obj.LSym); ok {
if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = sym
p.To.Sym = sym.Fn
} else {
// TODO(mdempsky): Can these differences be eliminated?
switch thearch.LinkArch.Family {
@ -6578,12 +6699,14 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) {
idx := s.livenessMap.Get(v)
if !idx.StackMapValid() {
// See Liveness.hasStackMap.
if sym, _ := v.Aux.(*obj.LSym); !(sym == typedmemclr || sym == typedmemmove) {
if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == typedmemclr || sym.Fn == typedmemmove) {
Fatalf("missing stack map index for %v", v.LongString())
}
}
if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn {
call, ok := v.Aux.(*ssa.AuxCall)
if ok && call.Fn == Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
@ -6595,11 +6718,11 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) {
thearch.Ginsnopdefer(s.pp)
}
if sym, ok := v.Aux.(*obj.LSym); ok {
if ok {
// Record call graph information for nowritebarrierrec
// analysis.
if nowritebarrierrecCheck != nil {
nowritebarrierrecCheck.recordCall(s.pp.curfn, sym, v.Pos)
nowritebarrierrecCheck.recordCall(s.pp.curfn, call.Fn, v.Pos)
}
}
@ -6879,6 +7002,10 @@ func (e *ssafn) SetWBPos(pos src.XPos) {
e.curfn.Func.setWBPos(pos)
}
func (e *ssafn) MyImportPath() string {
return myimportpath
}
func (n *Node) Typ() *types.Type {
return n.Type
}

View file

@ -271,13 +271,6 @@ func autolabel(prefix string) *types.Sym {
return lookupN(prefix, int(n))
}
func restrictlookup(name string, pkg *types.Pkg) *types.Sym {
if !types.IsExported(name) && pkg != localpkg {
yyerror("cannot refer to unexported name %s.%s", pkg.Name, name)
}
return pkg.Lookup(name)
}
// find all the exported symbols in package opkg
// and make them available in the current package
func importdot(opkg *types.Pkg, pack *Node) {
@ -696,14 +689,14 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
// (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't.
if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
if why != nil {
*why = fmt.Sprintf(":\n\t%v is go:notinheap, but %v is not", dst.Elem(), src.Elem())
*why = fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
}
return OXXX
}
// (b) Disallow string to []T where T is go:notinheap.
if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Etype == types.Bytetype.Etype || dst.Elem().Etype == types.Runetype.Etype) {
if why != nil {
*why = fmt.Sprintf(":\n\t%v is go:notinheap", dst.Elem())
*why = fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
}
return OXXX
}
@ -788,12 +781,12 @@ func convertop(srcConstant bool, src, dst *types.Type, why *string) Op {
}
// 8. src is a pointer or uintptr and dst is unsafe.Pointer.
if (src.IsPtr() || src.Etype == TUINTPTR) && dst.Etype == TUNSAFEPTR {
if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
return OCONVNOP
}
// 9. src is unsafe.Pointer and dst is a pointer or uintptr.
if src.Etype == TUNSAFEPTR && (dst.IsPtr() || dst.Etype == TUINTPTR) {
if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
return OCONVNOP
}
@ -935,16 +928,20 @@ func (o Op) IsSlice3() bool {
return false
}
// slicePtrLen extracts the pointer and length from a slice.
// backingArrayPtrLen extracts the pointer and length from a slice or string.
// This constructs two nodes referring to n, so n must be a cheapexpr.
func (n *Node) slicePtrLen() (ptr, len *Node) {
func (n *Node) backingArrayPtrLen() (ptr, len *Node) {
var init Nodes
c := cheapexpr(n, &init)
if c != n || init.Len() != 0 {
Fatalf("slicePtrLen not cheap: %v", n)
Fatalf("backingArrayPtrLen not cheap: %v", n)
}
ptr = nod(OSPTR, n, nil)
if n.Type.IsString() {
ptr.Type = types.Types[TUINT8].PtrTo()
} else {
ptr.Type = n.Type.Elem().PtrTo()
}
len = nod(OLEN, n, nil)
len.Type = types.Types[TINT]
return ptr, len
@ -1550,7 +1547,6 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
tfn.List.Set(structargs(method.Type.Params(), true))
tfn.Rlist.Set(structargs(method.Type.Results(), false))
disableExport(newnam)
fn := dclfunc(newnam, tfn)
fn.Func.SetDupok(true)
@ -1623,7 +1619,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
escapeFuncs([]*Node{fn}, false)
Curfn = nil
funccompile(fn)
xtop = append(xtop, fn)
}
func paramNnames(ft *types.Type) []*Node {
@ -1638,8 +1634,7 @@ func hashmem(t *types.Type) *Node {
sym := Runtimepkg.Lookup("memhash")
n := newname(sym)
n.SetClass(PFUNC)
n.Sym.SetFunc(true)
setNodeNameFunc(n)
n.Type = functype(nil, []*Node{
anonfield(types.NewPtr(t)),
anonfield(types.Types[TUINTPTR]),

View file

@ -142,7 +142,7 @@ const (
_, _ // second nodeInitorder bit
_, nodeHasBreak
_, nodeNoInline // used internally by inliner to indicate that a function call should not be inlined; set for OCALLFUNC and OCALLMETH only
_, nodeImplicit
_, nodeImplicit // implicit OADDR or ODEREF; ++/-- statement represented as OASOP; or ANDNOT lowered to OAND
_, nodeIsDDD // is the argument variadic
_, nodeDiag // already printed error about this
_, nodeColas // OAS resulting from :=
@ -359,7 +359,6 @@ const (
nameReadonly
nameByval // is the variable captured by value or by reference
nameNeedzero // if it contains pointers, needs to be zeroed on function entry
nameKeepalive // mark value live across unknown assembly call
nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap)
nameUsed // for variable declared and not used error
nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original at n.Name.Defn
@ -376,7 +375,6 @@ func (n *Name) Captured() bool { return n.flags&nameCaptured != 0 }
func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 }
func (n *Name) Byval() bool { return n.flags&nameByval != 0 }
func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 }
func (n *Name) Keepalive() bool { return n.flags&nameKeepalive != 0 }
func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 }
func (n *Name) Used() bool { return n.flags&nameUsed != 0 }
func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 }
@ -392,7 +390,6 @@ func (n *Name) SetCaptured(b bool) { n.flags.set(nameCaptured, b) }
func (n *Name) SetReadonly(b bool) { n.flags.set(nameReadonly, b) }
func (n *Name) SetByval(b bool) { n.flags.set(nameByval, b) }
func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) }
func (n *Name) SetKeepalive(b bool) { n.flags.set(nameKeepalive, b) }
func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) }
func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) }
func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) }
@ -692,6 +689,7 @@ const (
// Prior to walk, they are: Left(List), where List is all regular arguments.
// After walk, List is a series of assignments to temporaries,
// and Rlist is an updated set of arguments.
// Nbody is all OVARLIVE nodes that are attached to OCALLxxx.
// TODO(josharian/khr): Use Ninit instead of List for the assignments to temporaries. See CL 114797.
OCALLFUNC // Left(List/Rlist) (function call f(args))
OCALLMETH // Left(List/Rlist) (direct method call x.Method(args))
@ -718,7 +716,7 @@ const (
ODCLCONST // const pi = 3.14
ODCLTYPE // type Int int or type Int = int
ODELETE // delete(Left, Right)
ODELETE // delete(List)
ODOT // Left.Sym (Left is of struct type)
ODOTPTR // Left.Sym (Left is of pointer to struct type)
ODOTMETH // Left.Sym (Left is non-interface, Right is method name)

View file

@ -151,8 +151,8 @@ var _typekind = []string{
}
func typekind(t *types.Type) string {
if t.IsSlice() {
return "slice"
if t.IsUntyped() {
return fmt.Sprintf("%v", t)
}
et := t.Etype
if int(et) < len(_typekind) {
@ -471,10 +471,10 @@ func typecheck1(n *Node, top int) (res *Node) {
return n
}
if l.Type.NotInHeap() {
yyerror("go:notinheap map key not allowed")
yyerror("incomplete (or unallocatable) map key not allowed")
}
if r.Type.NotInHeap() {
yyerror("go:notinheap map value not allowed")
yyerror("incomplete (or unallocatable) map value not allowed")
}
setTypeNode(n, types.NewMap(l.Type, r.Type))
@ -491,7 +491,7 @@ func typecheck1(n *Node, top int) (res *Node) {
return n
}
if l.Type.NotInHeap() {
yyerror("chan of go:notinheap type not allowed")
yyerror("chan of incomplete (or unallocatable) type not allowed")
}
setTypeNode(n, types.NewChan(l.Type, n.TChanDir()))
@ -623,10 +623,29 @@ func typecheck1(n *Node, top int) (res *Node) {
// no defaultlit for left
// the outer context gives the type
n.Type = l.Type
if (l.Type == types.Idealfloat || l.Type == types.Idealcomplex) && r.Op == OLITERAL {
n.Type = types.Idealint
}
break
}
// For "x == x && len(s)", it's better to report that "len(s)" (type int)
// can't be used with "&&" than to report that "x == x" (type untyped bool)
// can't be converted to int (see issue #41500).
if n.Op == OANDAND || n.Op == OOROR {
if !n.Left.Type.IsBoolean() {
yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Left.Type))
n.Type = nil
return n
}
if !n.Right.Type.IsBoolean() {
yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(n.Right.Type))
n.Type = nil
return n
}
}
// ideal mixed with non-ideal
l, r = defaultlit2(l, r, false)
@ -713,7 +732,10 @@ func typecheck1(n *Node, top int) (res *Node) {
}
}
if !okfor[op][et] {
if t.Etype == TIDEAL {
t = mixUntyped(l.Type, r.Type)
}
if dt := defaultType(t); !okfor[op][dt.Etype] {
yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
n.Type = nil
return n
@ -753,15 +775,7 @@ func typecheck1(n *Node, top int) (res *Node) {
}
}
t = l.Type
if iscmp[n.Op] {
// TIDEAL includes complex constant, but only OEQ and ONE are defined for complex,
// so check that the n.op is available for complex here before doing evconst.
if !okfor[n.Op][TCOMPLEX128] && (Isconst(l, CTCPLX) || Isconst(r, CTCPLX)) {
yyerror("invalid operation: %v (operator %v not defined on untyped complex)", n, n.Op)
n.Type = nil
return n
}
evconst(n)
t = types.Idealbool
if n.Op != OLITERAL {
@ -808,8 +822,8 @@ func typecheck1(n *Node, top int) (res *Node) {
n.Type = nil
return n
}
if !okfor[n.Op][t.Etype] {
yyerror("invalid operation: %v %v", n.Op, t)
if !okfor[n.Op][defaultType(t).Etype] {
yyerror("invalid operation: %v (operator %v not defined on %s)", n, n.Op, typekind(t))
n.Type = nil
return n
}
@ -1678,7 +1692,7 @@ func typecheck1(n *Node, top int) (res *Node) {
}
var why string
n.Op = convertop(n.Left.Op == OLITERAL, t, n.Type, &why)
if n.Op == 0 {
if n.Op == OXXX {
if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() {
yyerror("cannot convert %L to type %v%s", n.Left, n.Type, why)
n.SetDiag(true)
@ -2068,12 +2082,6 @@ func typecheck1(n *Node, top int) (res *Node) {
ok |= ctxStmt
n.Left = typecheck(n.Left, ctxType)
checkwidth(n.Left.Type)
if n.Left.Type != nil && n.Left.Type.NotInHeap() && n.Left.Name.Param.Pragma&NotInHeap == 0 {
// The type contains go:notinheap types, so it
// must be marked as such (alternatively, we
// could silently propagate go:notinheap).
yyerror("type %v must be go:notinheap", n.Left.Type)
}
}
t := n.Type
@ -2667,7 +2675,7 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes,
return
notenough:
if n == nil || !n.Diag() {
if n == nil || (!n.Diag() && n.Type != nil) {
details := errorDetails(nl, tstruct, isddd)
if call != nil {
// call is the expression being called, not the overall call.
@ -2708,13 +2716,13 @@ func errorDetails(nl Nodes, tstruct *types.Type, isddd bool) string {
return ""
}
}
return fmt.Sprintf("\n\thave %s\n\twant %v", nl.retsigerr(isddd), tstruct)
return fmt.Sprintf("\n\thave %s\n\twant %v", nl.sigerr(isddd), tstruct)
}
// sigrepr is a type's representation to the outside world,
// in string representations of return signatures
// e.g in error messages about wrong arguments to return.
func sigrepr(t *types.Type) string {
func sigrepr(t *types.Type, isddd bool) string {
switch t {
case types.Idealstring:
return "string"
@ -2729,26 +2737,29 @@ func sigrepr(t *types.Type) string {
return "number"
}
// Turn []T... argument to ...T for clearer error message.
if isddd {
if !t.IsSlice() {
Fatalf("bad type for ... argument: %v", t)
}
return "..." + t.Elem().String()
}
return t.String()
}
// retsigerr returns the signature of the types
// at the respective return call site of a function.
func (nl Nodes) retsigerr(isddd bool) string {
// sigerr returns the signature of the types at the call or return.
func (nl Nodes) sigerr(isddd bool) string {
if nl.Len() < 1 {
return "()"
}
var typeStrings []string
for _, n := range nl.Slice() {
typeStrings = append(typeStrings, sigrepr(n.Type))
for i, n := range nl.Slice() {
isdddArg := isddd && i == nl.Len()-1
typeStrings = append(typeStrings, sigrepr(n.Type, isdddArg))
}
ddd := ""
if isddd {
ddd = "..."
}
return fmt.Sprintf("(%s%s)", strings.Join(typeStrings, ", "), ddd)
return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", "))
}
// type check composite
@ -3135,9 +3146,14 @@ func checkassign(stmt *Node, n *Node) {
return
}
if n.Op == ODOT && n.Left.Op == OINDEXMAP {
switch {
case n.Op == ODOT && n.Left.Op == OINDEXMAP:
yyerror("cannot assign to struct field %v in map", n)
} else {
case (n.Op == OINDEX && n.Left.Type.IsString()) || n.Op == OSLICESTR:
yyerror("cannot assign to %v (strings are immutable)", n)
case n.Op == OLITERAL && n.Sym != nil && n.isGoConst():
yyerror("cannot assign to %v (declared const)", n)
default:
yyerror("cannot assign to %v", n)
}
n.Type = nil

View file

@ -6,6 +6,7 @@ package gc
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/sys"
"encoding/binary"
@ -230,6 +231,13 @@ func walkstmt(n *Node) *Node {
case OCOPY:
n.Left = copyany(n.Left, &n.Ninit, true)
case OCALLFUNC, OCALLMETH, OCALLINTER:
if n.Left.Nbody.Len() > 0 {
n.Left = wrapCall(n.Left, &n.Ninit)
} else {
n.Left = walkexpr(n.Left, &n.Ninit)
}
default:
n.Left = walkexpr(n.Left, &n.Ninit)
}
@ -380,9 +388,9 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
switch {
case from.Size() == 2 && from.Align == 2:
return "convT16", false
case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from):
case from.Size() == 4 && from.Align == 4 && !from.HasPointers():
return "convT32", false
case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from):
case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !from.HasPointers():
return "convT64", false
}
if sc := from.SoleComponent(); sc != nil {
@ -396,12 +404,12 @@ func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
switch tkind {
case 'E':
if !types.Haspointers(from) {
if !from.HasPointers() {
return "convT2Enoptr", true
}
return "convT2E", true
case 'I':
if !types.Haspointers(from) {
if !from.HasPointers() {
return "convT2Inoptr", true
}
return "convT2I", true
@ -640,7 +648,7 @@ opswitch:
// x = append(...)
r := n.Right
if r.Type.Elem().NotInHeap() {
yyerror("%v is go:notinheap; heap allocation disallowed", r.Type.Elem())
yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", r.Type.Elem())
}
switch {
case isAppendOfMake(r):
@ -797,6 +805,10 @@ opswitch:
fromType := n.Left.Type
toType := n.Type
if !fromType.IsInterface() {
markTypeUsedInInterface(fromType)
}
// typeword generates the type word of the interface value.
typeword := func() *Node {
if toType.IsEmptyInterface() {
@ -949,11 +961,11 @@ opswitch:
case OCONV, OCONVNOP:
n.Left = walkexpr(n.Left, init)
if n.Op == OCONVNOP && checkPtr(Curfn, 1) {
if n.Type.IsPtr() && n.Left.Type.Etype == TUNSAFEPTR { // unsafe.Pointer to *T
if n.Type.IsPtr() && n.Left.Type.IsUnsafePtr() { // unsafe.Pointer to *T
n = walkCheckPtrAlignment(n, init, nil)
break
}
if n.Type.Etype == TUNSAFEPTR && n.Left.Type.Etype == TUINTPTR { // uintptr to unsafe.Pointer
if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() { // uintptr to unsafe.Pointer
n = walkCheckPtrArithmetic(n, init)
break
}
@ -968,6 +980,7 @@ opswitch:
case OANDNOT:
n.Left = walkexpr(n.Left, init)
n.Op = OAND
n.SetImplicit(true) // for walkCheckPtrArithmetic
n.Right = nod(OBITNOT, n.Right, nil)
n.Right = typecheck(n.Right, ctxExpr)
n.Right = walkexpr(n.Right, init)
@ -1117,7 +1130,7 @@ opswitch:
n.List.SetSecond(walkexpr(n.List.Second(), init))
case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
checkSlice := checkPtr(Curfn, 1) && n.Op == OSLICE3ARR && n.Left.Op == OCONVNOP && n.Left.Left.Type.Etype == TUNSAFEPTR
checkSlice := checkPtr(Curfn, 1) && n.Op == OSLICE3ARR && n.Left.Op == OCONVNOP && n.Left.Left.Type.IsUnsafePtr()
if checkSlice {
n.Left.Left = walkexpr(n.Left.Left, init)
} else {
@ -1150,6 +1163,9 @@ opswitch:
}
case ONEW:
if n.Type.Elem().NotInHeap() {
yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type.Elem())
}
if n.Esc == EscNone {
if n.Type.Elem().Width >= maxImplicitStackVarSize {
Fatalf("large ONEW with EscNone: %v", n)
@ -1318,6 +1334,9 @@ opswitch:
l = r
}
t := n.Type
if t.Elem().NotInHeap() {
yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
if n.Esc == EscNone {
if !isSmallMakeSlice(n) {
Fatalf("non-small OMAKESLICE with EscNone: %v", n)
@ -1359,10 +1378,6 @@ opswitch:
// When len and cap can fit into int, use makeslice instead of
// makeslice64, which is faster and shorter on 32 bit platforms.
if t.Elem().NotInHeap() {
yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem())
}
len, cap := l, r
fnname := "makeslice64"
@ -1397,14 +1412,14 @@ opswitch:
t := n.Type
if t.Elem().NotInHeap() {
Fatalf("%v is go:notinheap; heap allocation disallowed", t.Elem())
yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
}
length := conv(n.Left, types.Types[TINT])
copylen := nod(OLEN, n.Right, nil)
copyptr := nod(OSPTR, n.Right, nil)
if !types.Haspointers(t.Elem()) && n.Bounded() {
if !t.Elem().HasPointers() && n.Bounded() {
// When len(to)==len(from) and elements have no pointers:
// replace make+copy with runtime.mallocgc+runtime.memmove.
@ -1469,7 +1484,7 @@ opswitch:
} else {
// slicebytetostring(*[32]byte, ptr *byte, n int) string
n.Left = cheapexpr(n.Left, init)
ptr, len := n.Left.slicePtrLen()
ptr, len := n.Left.backingArrayPtrLen()
n = mkcall("slicebytetostring", n.Type, init, a, ptr, len)
}
@ -1482,7 +1497,7 @@ opswitch:
}
// slicebytetostringtmp(ptr *byte, n int) string
n.Left = cheapexpr(n.Left, init)
ptr, len := n.Left.slicePtrLen()
ptr, len := n.Left.backingArrayPtrLen()
n = mkcall("slicebytetostringtmp", n.Type, init, ptr, len)
case OSTR2BYTES:
@ -1551,8 +1566,7 @@ opswitch:
if isStaticCompositeLiteral(n) && !canSSAType(n.Type) {
// n can be directly represented in the read-only data section.
// Make direct reference to the static data. See issue 12841.
vstat := staticname(n.Type)
vstat.MarkReadonly()
vstat := readonlystaticname(n.Type)
fixedlit(inInitFunction, initKindStatic, n, vstat, init)
n = vstat
n = typecheck(n, ctxExpr)
@ -1605,6 +1619,12 @@ opswitch:
return n
}
// markTypeUsedInInterface marks that type t is converted to an interface.
// This information is used in the linker in dead method elimination.
func markTypeUsedInInterface(t *types.Type) {
typenamesym(t).Linksym().Set(obj.AttrUsedInIface, true)
}
// rtconvfn returns the parameter and result types that will be used by a
// runtime function to convert from type src to type dst. The runtime function
// name can be derived from the names of the returned types.
@ -2001,9 +2021,6 @@ func walkprint(nn *Node, init *Nodes) *Node {
}
func callnew(t *types.Type) *Node {
if t.NotInHeap() {
yyerror("%v is go:notinheap; heap allocation disallowed", t)
}
dowidth(t)
n := nod(ONEWOBJ, typename(t), nil)
n.Type = types.NewPtr(t)
@ -2578,7 +2595,7 @@ func mapfast(t *types.Type) int {
}
switch algtype(t.Key()) {
case AMEM32:
if !t.Key().HasHeapPointer() {
if !t.Key().HasPointers() {
return mapfast32
}
if Widthptr == 4 {
@ -2586,7 +2603,7 @@ func mapfast(t *types.Type) int {
}
Fatalf("small pointer %v", t.Key())
case AMEM64:
if !t.Key().HasHeapPointer() {
if !t.Key().HasPointers() {
return mapfast64
}
if Widthptr == 8 {
@ -2733,7 +2750,7 @@ func appendslice(n *Node, init *Nodes) *Node {
nodes.Append(nod(OAS, s, nt))
var ncopy *Node
if elemtype.HasHeapPointer() {
if elemtype.HasPointers() {
// copy(s[len(l1):], l2)
nptr1 := nod(OSLICE, s, nil)
nptr1.Type = s.Type
@ -2747,36 +2764,25 @@ func appendslice(n *Node, init *Nodes) *Node {
// instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
fn := syslook("typedslicecopy")
fn = substArgTypes(fn, l1.Type.Elem(), l2.Type.Elem())
ptr1, len1 := nptr1.slicePtrLen()
ptr2, len2 := nptr2.slicePtrLen()
ptr1, len1 := nptr1.backingArrayPtrLen()
ptr2, len2 := nptr2.backingArrayPtrLen()
ncopy = mkcall1(fn, types.Types[TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2)
} else if instrumenting && !compiling_runtime {
// rely on runtime to instrument copy.
// rely on runtime to instrument:
// copy(s[len(l1):], l2)
// l2 can be a slice or string.
nptr1 := nod(OSLICE, s, nil)
nptr1.Type = s.Type
nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
nptr1 = cheapexpr(nptr1, &nodes)
nptr2 := l2
if l2.Type.IsString() {
// instantiate func slicestringcopy(toPtr *byte, toLen int, fr string) int
fn := syslook("slicestringcopy")
ptr, len := nptr1.slicePtrLen()
str := nod(OCONVNOP, nptr2, nil)
str.Type = types.Types[TSTRING]
ncopy = mkcall1(fn, types.Types[TINT], &nodes, ptr, len, str)
} else {
// instantiate func slicecopy(to any, fr any, wid uintptr) int
fn := syslook("slicecopy")
fn = substArgTypes(fn, l1.Type.Elem(), l2.Type.Elem())
ptr1, len1 := nptr1.slicePtrLen()
ptr2, len2 := nptr2.slicePtrLen()
ncopy = mkcall1(fn, types.Types[TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width))
}
ptr1, len1 := nptr1.backingArrayPtrLen()
ptr2, len2 := nptr2.backingArrayPtrLen()
fn := syslook("slicecopy")
fn = substArgTypes(fn, ptr1.Type.Elem(), ptr2.Type.Elem())
ncopy = mkcall1(fn, types.Types[TINT], &nodes, ptr1, len1, ptr2, len2, nodintconst(elemtype.Width))
} else {
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
nptr1 := nod(OINDEX, s, nod(OLEN, l1, nil))
@ -2854,7 +2860,7 @@ func isAppendOfMake(n *Node) bool {
// s = s[:n]
// lptr := &l1[0]
// sptr := &s[0]
// if lptr == sptr || !hasPointers(T) {
// if lptr == sptr || !T.HasPointers() {
// // growslice did not clear the whole underlying array (or did not get called)
// hp := &s[len(l1)]
// hn := l2 * sizeof(T)
@ -2935,7 +2941,7 @@ func extendslice(n *Node, init *Nodes) *Node {
hn = conv(hn, types.Types[TUINTPTR])
clrname := "memclrNoHeapPointers"
hasPointers := types.Haspointers(elemtype)
hasPointers := elemtype.HasPointers()
if hasPointers {
clrname = "memclrHasPointers"
Curfn.Func.setWBPos(n.Pos)
@ -3071,32 +3077,29 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node {
// Also works if b is a string.
//
func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
if n.Left.Type.Elem().HasHeapPointer() {
if n.Left.Type.Elem().HasPointers() {
Curfn.Func.setWBPos(n.Pos)
fn := writebarrierfn("typedslicecopy", n.Left.Type.Elem(), n.Right.Type.Elem())
n.Left = cheapexpr(n.Left, init)
ptrL, lenL := n.Left.slicePtrLen()
ptrL, lenL := n.Left.backingArrayPtrLen()
n.Right = cheapexpr(n.Right, init)
ptrR, lenR := n.Right.slicePtrLen()
ptrR, lenR := n.Right.backingArrayPtrLen()
return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), ptrL, lenL, ptrR, lenR)
}
if runtimecall {
if n.Right.Type.IsString() {
fn := syslook("slicestringcopy")
// rely on runtime to instrument:
// copy(n.Left, n.Right)
// n.Right can be a slice or string.
n.Left = cheapexpr(n.Left, init)
ptr, len := n.Left.slicePtrLen()
str := nod(OCONVNOP, n.Right, nil)
str.Type = types.Types[TSTRING]
return mkcall1(fn, n.Type, init, ptr, len, str)
}
ptrL, lenL := n.Left.backingArrayPtrLen()
n.Right = cheapexpr(n.Right, init)
ptrR, lenR := n.Right.backingArrayPtrLen()
fn := syslook("slicecopy")
fn = substArgTypes(fn, n.Left.Type.Elem(), n.Right.Type.Elem())
n.Left = cheapexpr(n.Left, init)
ptrL, lenL := n.Left.slicePtrLen()
n.Right = cheapexpr(n.Right, init)
ptrR, lenR := n.Right.slicePtrLen()
fn = substArgTypes(fn, ptrL.Type.Elem(), ptrR.Type.Elem())
return mkcall1(fn, n.Type, init, ptrL, lenL, ptrR, lenR, nodintconst(n.Left.Type.Elem().Width))
}
@ -3156,8 +3159,7 @@ func eqfor(t *types.Type) (n *Node, needsize bool) {
case ASPECIAL:
sym := typesymprefix(".eq", t)
n := newname(sym)
n.SetClass(PFUNC)
n.Sym.SetFunc(true)
setNodeNameFunc(n)
n.Type = functype(nil, []*Node{
anonfield(types.NewPtr(t)),
anonfield(types.NewPtr(t)),
@ -3848,6 +3850,14 @@ func candiscard(n *Node) bool {
// builtin(a1, a2, a3)
// }(x, y, z)
// for print, println, and delete.
//
// Rewrite
// go f(x, y, uintptr(unsafe.Pointer(z)))
// into
// go func(a1, a2, a3) {
// builtin(a1, a2, uintptr(a3))
// }(x, y, unsafe.Pointer(z))
// for function contains unsafe-uintptr arguments.
var wrapCall_prgen int
@ -3859,9 +3869,17 @@ func wrapCall(n *Node, init *Nodes) *Node {
init.AppendNodes(&n.Ninit)
}
isBuiltinCall := n.Op != OCALLFUNC && n.Op != OCALLMETH && n.Op != OCALLINTER
// origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
origArgs := make([]*Node, n.List.Len())
t := nod(OTFUNC, nil, nil)
for i, arg := range n.List.Slice() {
s := lookupN("a", i)
if !isBuiltinCall && arg.Op == OCONVNOP && arg.Type.IsUintptr() && arg.Left.Type.IsUnsafePtr() {
origArgs[i] = arg
arg = arg.Left
n.List.SetIndex(i, arg)
}
t.List.Append(symfield(s, arg.Type))
}
@ -3869,10 +3887,23 @@ func wrapCall(n *Node, init *Nodes) *Node {
sym := lookupN("wrap·", wrapCall_prgen)
fn := dclfunc(sym, t)
a := nod(n.Op, nil, nil)
a.List.Set(paramNnames(t.Type))
a = typecheck(a, ctxStmt)
fn.Nbody.Set1(a)
args := paramNnames(t.Type)
for i, origArg := range origArgs {
if origArg == nil {
continue
}
arg := nod(origArg.Op, args[i], nil)
arg.Type = origArg.Type
args[i] = arg
}
call := nod(n.Op, nil, nil)
if !isBuiltinCall {
call.Op = OCALL
call.Left = n.Left
call.SetIsDDD(n.IsDDD())
}
call.List.Set(args)
fn.Nbody.Set1(call)
funcbody()
@ -3880,12 +3911,12 @@ func wrapCall(n *Node, init *Nodes) *Node {
typecheckslice(fn.Nbody.Slice(), ctxStmt)
xtop = append(xtop, fn)
a = nod(OCALL, nil, nil)
a.Left = fn.Func.Nname
a.List.Set(n.List.Slice())
a = typecheck(a, ctxStmt)
a = walkexpr(a, init)
return a
call = nod(OCALL, nil, nil)
call.Left = fn.Func.Nname
call.List.Set(n.List.Slice())
call = typecheck(call, ctxStmt)
call = walkexpr(call, init)
return call
}
// substArgTypes substitutes the given list of types for
@ -3993,10 +4024,14 @@ func walkCheckPtrArithmetic(n *Node, init *Nodes) *Node {
case OADD:
walk(n.Left)
walk(n.Right)
case OSUB, OANDNOT:
case OSUB:
walk(n.Left)
case OAND:
if n.Implicit() { // was OANDNOT
walk(n.Left)
}
case OCONVNOP:
if n.Left.Type.Etype == TUNSAFEPTR {
if n.Left.Type.IsUnsafePtr() {
n.Left = cheapexpr(n.Left, init)
originals = append(originals, convnop(n.Left, types.Types[TUNSAFEPTR]))
}

View file

@ -565,6 +565,42 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p = s.Prog(obj.ANOP)
gc.Patch(pbover, p)
case ssa.OpPPC64CLRLSLWI:
r := v.Reg()
r1 := v.Args[0].Reg()
shifts := v.AuxInt
p := s.Prog(v.Op.Asm())
// clrlslwi ra,rs,sh,mb will become rlwinm ra,rs,sh,mb-sh,31-n as described in ISA
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)}
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)})
p.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpPPC64CLRLSLDI:
r := v.Reg()
r1 := v.Args[0].Reg()
shifts := v.AuxInt
p := s.Prog(v.Op.Asm())
// clrlsldi ra,rs,sh,mb will become rldic ra,rs,sh,mb-sh
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)}
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)})
p.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = r
// Mask has been set as sh
case ssa.OpPPC64RLDICL:
r := v.Reg()
r1 := v.Args[0].Reg()
shifts := v.AuxInt
p := s.Prog(v.Op.Asm())
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)}
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)})
p.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS,
ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64DIVDU, ssa.OpPPC64DIVWU,
ssa.OpPPC64SRAD, ssa.OpPPC64SRAW, ssa.OpPPC64SRD, ssa.OpPPC64SRW, ssa.OpPPC64SLD, ssa.OpPPC64SLW,
@ -601,6 +637,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpPPC64MADDLD:
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
r3 := v.Args[2].Reg()
// r = r1*r2 ± r3
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.Reg = r2
p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r3})
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpPPC64FMADD, ssa.OpPPC64FMADDS, ssa.OpPPC64FMSUB, ssa.OpPPC64FMSUBS:
r := v.Reg()
r1 := v.Args[0].Reg()
@ -615,23 +665,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpPPC64MaskIfNotCarry:
r := v.Reg()
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGZERO
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpPPC64ADDconstForCarry:
r1 := v.Args[0].Reg()
p := s.Prog(v.Op.Asm())
p.Reg = r1
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP // Ignored; this is for the carry effect.
case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FFLOOR, ssa.OpPPC64FTRUNC, ssa.OpPPC64FCEIL,
ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FCFIDS, ssa.OpPPC64FRSP, ssa.OpPPC64CNTLZD, ssa.OpPPC64CNTLZW,
ssa.OpPPC64POPCNTD, ssa.OpPPC64POPCNTW, ssa.OpPPC64POPCNTB, ssa.OpPPC64MFVSRD, ssa.OpPPC64MTVSRD, ssa.OpPPC64FABS, ssa.OpPPC64FNABS,
@ -652,6 +685,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpPPC64SUBFCconst:
p := s.Prog(v.Op.Asm())
p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt})
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpPPC64ANDCCconst:
p := s.Prog(v.Op.Asm())
p.Reg = v.Args[0].Reg()
@ -1788,7 +1829,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("Pseudo-op should not make it to codegen: %s ###\n", v.LongString())
case ssa.OpPPC64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT, ssa.OpPPC64FlagCarrySet, ssa.OpPPC64FlagCarryClear:
case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.OpClobber:
// TODO: implement for clobberdead experiment. Nop is ok for now.

View file

@ -338,8 +338,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
n.To.Reg = dividend
}
j.To.Val = n
j2.To.Val = s.Pc()
j.To.SetTarget(n)
j2.To.SetTarget(s.Pc())
}
case ssa.OpS390XADDconst, ssa.OpS390XADDWconst:
opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)

View file

@ -7,12 +7,14 @@ package ssa
// addressingModes combines address calculations into memory operations
// that can perform complicated addressing modes.
func addressingModes(f *Func) {
isInImmediateRange := is32Bit
switch f.Config.arch {
default:
// Most architectures can't do this.
return
case "amd64", "386":
// TODO: s390x?
case "s390x":
isInImmediateRange = is20Bit
}
var tmp []*Value
@ -40,7 +42,7 @@ func addressingModes(f *Func) {
switch [2]auxType{opcodeTable[v.Op].auxType, opcodeTable[p.Op].auxType} {
case [2]auxType{auxSymOff, auxInt32}:
// TODO: introduce auxSymOff32
if !is32Bit(v.AuxInt + p.AuxInt) {
if !isInImmediateRange(v.AuxInt + p.AuxInt) {
continue
}
v.AuxInt += p.AuxInt
@ -48,7 +50,7 @@ func addressingModes(f *Func) {
if v.Aux != nil && p.Aux != nil {
continue
}
if !is32Bit(v.AuxInt + p.AuxInt) {
if !isInImmediateRange(v.AuxInt + p.AuxInt) {
continue
}
if p.Aux != nil {
@ -321,6 +323,23 @@ var combine = map[[2]Op]Op{
[2]Op{OpAMD64XORQconstmodify, OpAMD64LEAQ1}: OpAMD64XORQconstmodifyidx1,
[2]Op{OpAMD64XORQconstmodify, OpAMD64LEAQ8}: OpAMD64XORQconstmodifyidx8,
[2]Op{OpAMD64ADDSSload, OpAMD64LEAQ1}: OpAMD64ADDSSloadidx1,
[2]Op{OpAMD64ADDSSload, OpAMD64LEAQ4}: OpAMD64ADDSSloadidx4,
[2]Op{OpAMD64ADDSDload, OpAMD64LEAQ1}: OpAMD64ADDSDloadidx1,
[2]Op{OpAMD64ADDSDload, OpAMD64LEAQ8}: OpAMD64ADDSDloadidx8,
[2]Op{OpAMD64SUBSSload, OpAMD64LEAQ1}: OpAMD64SUBSSloadidx1,
[2]Op{OpAMD64SUBSSload, OpAMD64LEAQ4}: OpAMD64SUBSSloadidx4,
[2]Op{OpAMD64SUBSDload, OpAMD64LEAQ1}: OpAMD64SUBSDloadidx1,
[2]Op{OpAMD64SUBSDload, OpAMD64LEAQ8}: OpAMD64SUBSDloadidx8,
[2]Op{OpAMD64MULSSload, OpAMD64LEAQ1}: OpAMD64MULSSloadidx1,
[2]Op{OpAMD64MULSSload, OpAMD64LEAQ4}: OpAMD64MULSSloadidx4,
[2]Op{OpAMD64MULSDload, OpAMD64LEAQ1}: OpAMD64MULSDloadidx1,
[2]Op{OpAMD64MULSDload, OpAMD64LEAQ8}: OpAMD64MULSDloadidx8,
[2]Op{OpAMD64DIVSSload, OpAMD64LEAQ1}: OpAMD64DIVSSloadidx1,
[2]Op{OpAMD64DIVSSload, OpAMD64LEAQ4}: OpAMD64DIVSSloadidx4,
[2]Op{OpAMD64DIVSDload, OpAMD64LEAQ1}: OpAMD64DIVSDloadidx1,
[2]Op{OpAMD64DIVSDload, OpAMD64LEAQ8}: OpAMD64DIVSDloadidx8,
// 386
[2]Op{Op386MOVBload, Op386ADDL}: Op386MOVBloadidx1,
[2]Op{Op386MOVWload, Op386ADDL}: Op386MOVWloadidx1,
@ -381,4 +400,61 @@ var combine = map[[2]Op]Op{
[2]Op{Op386ANDLconstmodify, Op386LEAL4}: Op386ANDLconstmodifyidx4,
[2]Op{Op386ORLconstmodify, Op386LEAL4}: Op386ORLconstmodifyidx4,
[2]Op{Op386XORLconstmodify, Op386LEAL4}: Op386XORLconstmodifyidx4,
// s390x
[2]Op{OpS390XMOVDload, OpS390XADD}: OpS390XMOVDloadidx,
[2]Op{OpS390XMOVWload, OpS390XADD}: OpS390XMOVWloadidx,
[2]Op{OpS390XMOVHload, OpS390XADD}: OpS390XMOVHloadidx,
[2]Op{OpS390XMOVBload, OpS390XADD}: OpS390XMOVBloadidx,
[2]Op{OpS390XMOVWZload, OpS390XADD}: OpS390XMOVWZloadidx,
[2]Op{OpS390XMOVHZload, OpS390XADD}: OpS390XMOVHZloadidx,
[2]Op{OpS390XMOVBZload, OpS390XADD}: OpS390XMOVBZloadidx,
[2]Op{OpS390XMOVDBRload, OpS390XADD}: OpS390XMOVDBRloadidx,
[2]Op{OpS390XMOVWBRload, OpS390XADD}: OpS390XMOVWBRloadidx,
[2]Op{OpS390XMOVHBRload, OpS390XADD}: OpS390XMOVHBRloadidx,
[2]Op{OpS390XFMOVDload, OpS390XADD}: OpS390XFMOVDloadidx,
[2]Op{OpS390XFMOVSload, OpS390XADD}: OpS390XFMOVSloadidx,
[2]Op{OpS390XMOVDstore, OpS390XADD}: OpS390XMOVDstoreidx,
[2]Op{OpS390XMOVWstore, OpS390XADD}: OpS390XMOVWstoreidx,
[2]Op{OpS390XMOVHstore, OpS390XADD}: OpS390XMOVHstoreidx,
[2]Op{OpS390XMOVBstore, OpS390XADD}: OpS390XMOVBstoreidx,
[2]Op{OpS390XMOVDBRstore, OpS390XADD}: OpS390XMOVDBRstoreidx,
[2]Op{OpS390XMOVWBRstore, OpS390XADD}: OpS390XMOVWBRstoreidx,
[2]Op{OpS390XMOVHBRstore, OpS390XADD}: OpS390XMOVHBRstoreidx,
[2]Op{OpS390XFMOVDstore, OpS390XADD}: OpS390XFMOVDstoreidx,
[2]Op{OpS390XFMOVSstore, OpS390XADD}: OpS390XFMOVSstoreidx,
[2]Op{OpS390XMOVDload, OpS390XMOVDaddridx}: OpS390XMOVDloadidx,
[2]Op{OpS390XMOVWload, OpS390XMOVDaddridx}: OpS390XMOVWloadidx,
[2]Op{OpS390XMOVHload, OpS390XMOVDaddridx}: OpS390XMOVHloadidx,
[2]Op{OpS390XMOVBload, OpS390XMOVDaddridx}: OpS390XMOVBloadidx,
[2]Op{OpS390XMOVWZload, OpS390XMOVDaddridx}: OpS390XMOVWZloadidx,
[2]Op{OpS390XMOVHZload, OpS390XMOVDaddridx}: OpS390XMOVHZloadidx,
[2]Op{OpS390XMOVBZload, OpS390XMOVDaddridx}: OpS390XMOVBZloadidx,
[2]Op{OpS390XMOVDBRload, OpS390XMOVDaddridx}: OpS390XMOVDBRloadidx,
[2]Op{OpS390XMOVWBRload, OpS390XMOVDaddridx}: OpS390XMOVWBRloadidx,
[2]Op{OpS390XMOVHBRload, OpS390XMOVDaddridx}: OpS390XMOVHBRloadidx,
[2]Op{OpS390XFMOVDload, OpS390XMOVDaddridx}: OpS390XFMOVDloadidx,
[2]Op{OpS390XFMOVSload, OpS390XMOVDaddridx}: OpS390XFMOVSloadidx,
[2]Op{OpS390XMOVDstore, OpS390XMOVDaddridx}: OpS390XMOVDstoreidx,
[2]Op{OpS390XMOVWstore, OpS390XMOVDaddridx}: OpS390XMOVWstoreidx,
[2]Op{OpS390XMOVHstore, OpS390XMOVDaddridx}: OpS390XMOVHstoreidx,
[2]Op{OpS390XMOVBstore, OpS390XMOVDaddridx}: OpS390XMOVBstoreidx,
[2]Op{OpS390XMOVDBRstore, OpS390XMOVDaddridx}: OpS390XMOVDBRstoreidx,
[2]Op{OpS390XMOVWBRstore, OpS390XMOVDaddridx}: OpS390XMOVWBRstoreidx,
[2]Op{OpS390XMOVHBRstore, OpS390XMOVDaddridx}: OpS390XMOVHBRstoreidx,
[2]Op{OpS390XFMOVDstore, OpS390XMOVDaddridx}: OpS390XFMOVDstoreidx,
[2]Op{OpS390XFMOVSstore, OpS390XMOVDaddridx}: OpS390XFMOVSstoreidx,
}

View file

@ -165,16 +165,28 @@ func checkFunc(f *Func) {
f.Fatalf("value %v has Aux type %T, want string", v, v.Aux)
}
canHaveAux = true
case auxCallOff:
canHaveAuxInt = true
fallthrough
case auxCall:
if ac, ok := v.Aux.(*AuxCall); ok {
if v.Op == OpStaticCall && ac.Fn == nil {
f.Fatalf("value %v has *AuxCall with nil Fn", v)
}
} else {
f.Fatalf("value %v has Aux type %T, want *AuxCall", v, v.Aux)
}
canHaveAux = true
case auxSym, auxTyp:
canHaveAux = true
case auxSymOff, auxSymValAndOff, auxTypSize:
canHaveAuxInt = true
canHaveAux = true
case auxCCop:
if _, ok := v.Aux.(Op); !ok {
f.Fatalf("bad type %T for CCop in %v", v.Aux, v)
if opcodeTable[Op(v.AuxInt)].name == "OpInvalid" {
f.Fatalf("value %v has an AuxInt value that is a valid opcode", v)
}
canHaveAux = true
canHaveAuxInt = true
case auxS390XCCMask:
if _, ok := v.Aux.(s390x.CCMask); !ok {
f.Fatalf("bad type %T for S390XCCMask in %v", v.Aux, v)
@ -257,6 +269,38 @@ func checkFunc(f *Func) {
f.Fatalf("bad %s type: want uintptr, have %s",
v.Op, v.Type.String())
}
case OpStringLen:
if v.Type != c.Types.Int {
f.Fatalf("bad %s type: want int, have %s",
v.Op, v.Type.String())
}
case OpLoad:
if !v.Args[1].Type.IsMemory() {
f.Fatalf("bad arg 1 type to %s: want mem, have %s",
v.Op, v.Args[1].Type.String())
}
case OpStore:
if !v.Type.IsMemory() {
f.Fatalf("bad %s type: want mem, have %s",
v.Op, v.Type.String())
}
if !v.Args[2].Type.IsMemory() {
f.Fatalf("bad arg 2 type to %s: want mem, have %s",
v.Op, v.Args[2].Type.String())
}
case OpCondSelect:
if !v.Args[2].Type.IsBoolean() {
f.Fatalf("bad arg 2 type to %s: want boolean, have %s",
v.Op, v.Args[2].Type.String())
}
case OpAddPtr:
if !v.Args[0].Type.IsPtrShaped() && v.Args[0].Type != c.Types.Uintptr {
f.Fatalf("bad arg 0 type to %s: want ptr, have %s", v.Op, v.Args[0].LongString())
}
if !v.Args[1].Type.IsInteger() {
f.Fatalf("bad arg 1 type to %s: want integer, have %s", v.Op, v.Args[1].LongString())
}
}
// TODO: check for cycles in values

View file

@ -160,15 +160,12 @@ func Compile(f *Func) {
phaseName = ""
}
// TODO: should be a config field
var dumpFileSeq int
// dumpFile creates a file from the phase name and function name
// Dumping is done to files to avoid buffering huge strings before
// output.
func (f *Func) dumpFile(phaseName string) {
dumpFileSeq++
fname := fmt.Sprintf("%s_%02d__%s.dump", f.Name, dumpFileSeq, phaseName)
f.dumpFileSeq++
fname := fmt.Sprintf("%s_%02d__%s.dump", f.Name, int(f.dumpFileSeq), phaseName)
fname = strings.Replace(fname, " ", "_", -1)
fname = strings.Replace(fname, "/", "_", -1)
fname = strings.Replace(fname, ":", "_", -1)
@ -436,6 +433,7 @@ var passes = [...]pass{
{name: "early fuse", fn: fuseEarly},
{name: "decompose builtin", fn: decomposeBuiltIn, required: true},
{name: "softfloat", fn: softfloat, required: true},
{name: "expand calls", fn:expandCalls, required: true},
{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
{name: "dead auto elim", fn: elimDeadAutosGeneric},
{name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain

View file

@ -173,6 +173,9 @@ type Frontend interface {
// SetWBPos indicates that a write barrier has been inserted
// in this function at position pos.
SetWBPos(pos src.XPos)
// MyImportPath provides the import name (roughly, the package) for the function being compiled.
MyImportPath() string
}
// interface used to hold a *gc.Node (a stack variable).
@ -245,7 +248,7 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
c.FPReg = framepointerRegARM64
c.LinkReg = linkRegARM64
c.hasGReg = true
c.noDuffDevice = objabi.GOOS == "darwin" // darwin linker cannot handle BR26 reloc with non-zero addend
c.noDuffDevice = objabi.GOOS == "darwin" || objabi.GOOS == "ios" // darwin linker cannot handle BR26 reloc with non-zero addend
case "ppc64":
c.BigEndian = true
fallthrough

View file

@ -1,6 +1,7 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
import (

View file

@ -23,9 +23,11 @@ func decomposeBuiltIn(f *Func) {
}
// Decompose other values
applyRewrite(f, rewriteBlockdec, rewriteValuedec)
// Note: deadcode is false because we need to keep the original
// values around so the name component resolution below can still work.
applyRewrite(f, rewriteBlockdec, rewriteValuedec, leaveDeadValues)
if f.Config.RegSize == 4 {
applyRewrite(f, rewriteBlockdec64, rewriteValuedec64)
applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, leaveDeadValues)
}
// Split up named values into their components.
@ -139,7 +141,7 @@ func decomposeStringPhi(v *Value) {
func decomposeSlicePhi(v *Value) {
types := &v.Block.Func.Config.Types
ptrType := types.BytePtr
ptrType := v.Type.Elem().PtrTo()
lenType := types.Int
ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
@ -215,7 +217,7 @@ func decomposeInterfacePhi(v *Value) {
}
func decomposeArgs(f *Func) {
applyRewrite(f, rewriteBlockdecArgs, rewriteValuedecArgs)
applyRewrite(f, rewriteBlockdecArgs, rewriteValuedecArgs, removeDeadValues)
}
func decomposeUser(f *Func) {

View file

@ -0,0 +1,100 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
import "cmd/compile/internal/types"
// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form
// that is more oriented to a platform's ABI. The SelectN operations that extract results are also rewritten into
// more appropriate forms.
func expandCalls(f *Func) {
canSSAType := f.fe.CanSSA
sp, _ := f.spSb()
// Calls that need lowering have some number of inputs, including a memory input,
// and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
// With the current ABI those inputs need to be converted into stores to memory,
// rethreading the call's memory input to the first, and the new call now receiving the last.
// With the current ABI, the outputs need to be converted to loads, which will all use the call's
// memory output as their input.
// Step 1: find all references to calls as values and rewrite those.
for _, b := range f.Blocks {
for _, v := range b.Values {
switch v.Op {
case OpSelectN:
call := v.Args[0]
aux := call.Aux.(*AuxCall)
which := v.AuxInt
t := v.Type
if which == aux.NResults() { // mem is after the results.
// rewrite v as a Copy of call -- the replacement call will produce a mem.
v.copyOf(call)
} else {
pt := types.NewPtr(t)
if canSSAType(t) {
off := f.ConstOffPtrSP(pt, aux.OffsetOfResult(which), sp)
v.reset(OpLoad)
v.SetArgs2(off, call)
} else {
panic("Should not have non-SSA-able OpSelectN")
}
}
v.Type = t // not right for the mem operand yet, but will be when call is rewritten.
case OpSelectNAddr:
call := v.Args[0]
which := v.AuxInt
aux := call.Aux.(*AuxCall)
pt := v.Type
off := f.ConstOffPtrSP(pt, aux.OffsetOfResult(which), sp)
v.copyOf(off)
}
}
}
// Step 2: rewrite the calls
for _, b := range f.Blocks {
for _, v := range b.Values {
switch v.Op {
case OpStaticLECall:
// Thread the stores on the memory arg
m0 := v.Args[len(v.Args)-1]
mem := m0
pos := v.Pos.WithNotStmt()
aux := v.Aux.(*AuxCall)
auxInt := v.AuxInt
for i, a := range v.Args {
if a == m0 {
break
}
if a.Op == OpDereference {
// "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
src := a.Args[0]
dst := f.ConstOffPtrSP(src.Type, aux.OffsetOfArg(int64(i)), sp)
a.reset(OpMove)
a.Pos = pos
a.Type = types.TypeMem
a.Aux = aux.TypeOfArg(int64(i))
a.AuxInt = aux.SizeOfArg(int64(i))
a.SetArgs3(dst, src, mem)
mem = a
} else {
// Add a new store.
t := aux.TypeOfArg(int64(i))
dst := f.ConstOffPtrSP(types.NewPtr(t), aux.OffsetOfArg(int64(i)), sp)
mem = b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, a, mem)
}
}
v.reset(OpStaticCall)
v.Type = types.TypeMem
v.Aux = aux
v.AuxInt = auxInt
v.SetArgs1(mem)
}
}
}
}

View file

@ -146,6 +146,10 @@ func (d DummyFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t
func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) }
func (d DummyFrontend) Debug_checknil() bool { return false }
func (d DummyFrontend) MyImportPath() string {
return "my/import/path"
}
var dummyTypes Types
func init() {

View file

@ -33,13 +33,6 @@ type Func struct {
Blocks []*Block // unordered set of all basic blocks (note: not indexable by ID)
Entry *Block // the entry basic block
// If we are using open-coded defers, this is the first call to a deferred
// function in the final defer exit sequence that we generated. This call
// should be after all defer statements, and will have all args, etc. of
// all defer calls as live. The liveness info of this call will be used
// for the deferreturn/ret segment generated for functions with open-coded
// defers.
LastDeferExit *Value
bid idAlloc // block ID allocator
vid idAlloc // value ID allocator
@ -54,6 +47,7 @@ type Func struct {
scheduled bool // Values in Blocks are in final order
laidout bool // Blocks are ordered
NoSplit bool // true if function is marked as nosplit. Used by schedule check pass.
dumpFileSeq uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName)
// when register allocation is done, maps value ids to locations
RegAlloc []Location
@ -263,6 +257,49 @@ func (f *Func) LogStat(key string, args ...interface{}) {
f.Warnl(f.Entry.Pos, "\t%s\t%s%s\t%s", n, key, value, f.Name)
}
// unCacheLine removes v from f's constant cache "line" for aux,
// resets v.InCache when it is found (and removed),
// and returns whether v was found in that line.
func (f *Func) unCacheLine(v *Value, aux int64) bool {
vv := f.constants[aux]
for i, cv := range vv {
if v == cv {
vv[i] = vv[len(vv)-1]
vv[len(vv)-1] = nil
f.constants[aux] = vv[0 : len(vv)-1]
v.InCache = false
return true
}
}
return false
}
// unCache removes v from f's constant cache.
func (f *Func) unCache(v *Value) {
if v.InCache {
aux := v.AuxInt
if f.unCacheLine(v, aux) {
return
}
if aux == 0 {
switch v.Op {
case OpConstNil:
aux = constNilMagic
case OpConstSlice:
aux = constSliceMagic
case OpConstString:
aux = constEmptyStringMagic
case OpConstInterface:
aux = constInterfaceMagic
}
if aux != 0 && f.unCacheLine(v, aux) {
return
}
}
f.Fatalf("unCached value %s not found in cache, auxInt=0x%x, adjusted aux=0x%x", v.LongString(), v.AuxInt, aux)
}
}
// freeValue frees a value. It must no longer be referenced or have any args.
func (f *Func) freeValue(v *Value) {
if v.Block == nil {
@ -276,19 +313,8 @@ func (f *Func) freeValue(v *Value) {
}
// Clear everything but ID (which we reuse).
id := v.ID
// Values with zero arguments and OpOffPtr values might be cached, so remove them there.
nArgs := opcodeTable[v.Op].argLen
if nArgs == 0 || v.Op == OpOffPtr {
vv := f.constants[v.AuxInt]
for i, cv := range vv {
if v == cv {
vv[i] = vv[len(vv)-1]
vv[len(vv)-1] = nil
f.constants[v.AuxInt] = vv[0 : len(vv)-1]
break
}
}
if v.InCache {
f.unCache(v)
}
*v = Value{}
v.ID = id
@ -554,6 +580,7 @@ func (f *Func) constVal(op Op, t *types.Type, c int64, setAuxInt bool) *Value {
v = f.Entry.NewValue0(src.NoXPos, op, t)
}
f.constants[c] = append(vv, v)
v.InCache = true
return v
}
@ -684,7 +711,8 @@ func (f *Func) invalidateCFG() {
// GSHS_LOGFILE
// or standard out if that is empty or there is an error
// opening the file.
func (f *Func) DebugHashMatch(evname, name string) bool {
func (f *Func) DebugHashMatch(evname string) bool {
name := f.fe.MyImportPath() + "." + f.Name
evhash := os.Getenv(evname)
switch evhash {
case "":
@ -733,7 +761,7 @@ func (f *Func) logDebugHashMatch(evname, name string) {
file = os.Stdout
if tmpfile := os.Getenv("GSHS_LOGFILE"); tmpfile != "" {
var err error
file, err = os.Create(tmpfile)
file, err = os.OpenFile(tmpfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
f.Fatalf("could not open hash-testing logfile %s", tmpfile)
}
@ -747,3 +775,25 @@ func (f *Func) logDebugHashMatch(evname, name string) {
func DebugNameMatch(evname, name string) bool {
return os.Getenv(evname) == name
}
func (f *Func) spSb() (sp, sb *Value) {
initpos := f.Entry.Pos
for _, v := range f.Entry.Values {
if v.Op == OpSB {
sb = v
}
if v.Op == OpSP {
sp = v
}
if sb != nil && sp != nil {
break
}
}
if sb == nil {
sb = f.Entry.NewValue0(initpos, OpSB, f.Config.Types.Uintptr)
}
if sp == nil {
sp = f.Entry.NewValue0(initpos, OpSP, f.Config.Types.Uintptr)
}
return
}

View file

@ -38,6 +38,7 @@ package ssa
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
"reflect"
@ -140,6 +141,12 @@ var emptyPass pass = pass{
name: "empty pass",
}
// AuxCallLSym returns an AuxCall initialized with an LSym that should pass "check"
// as the Aux of a static call.
func AuxCallLSym(name string) *AuxCall {
return &AuxCall{Fn: &obj.LSym{}}
}
// Fun takes the name of an entry bloc and a series of Bloc calls, and
// returns a fun containing the composed Func. entry must be a name
// supplied to one of the Bloc functions. Each of the bloc names and

View file

@ -142,10 +142,10 @@ func TestFuseSideEffects(t *testing.T) {
Valu("b", OpArg, c.config.Types.Bool, 0, nil),
If("b", "then", "else")),
Bloc("then",
Valu("call1", OpStaticCall, types.TypeMem, 0, nil, "mem"),
Valu("call1", OpStaticCall, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
Goto("empty")),
Bloc("else",
Valu("call2", OpStaticCall, types.TypeMem, 0, nil, "mem"),
Valu("call2", OpStaticCall, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
Goto("empty")),
Bloc("empty",
Goto("loop")),

View file

@ -463,9 +463,9 @@ func init() {
faultOnNilArg0: true,
},
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
// arg0 = destination pointer
// arg1 = source pointer

File diff suppressed because it is too large Load diff

View file

@ -153,6 +153,7 @@ func init() {
fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly}
fp21load = regInfo{inputs: []regMask{fp, gpspsb, 0}, outputs: fponly}
fp21loadidx = regInfo{inputs: []regMask{fp, gpspsb, gpspsb, 0}, outputs: fponly}
fpgp = regInfo{inputs: fponly, outputs: gponly}
gpfp = regInfo{inputs: gponly, outputs: fponly}
fp11 = regInfo{inputs: fponly, outputs: fponly}
@ -201,6 +202,23 @@ func init() {
{name: "DIVSSload", argLength: 3, reg: fp21load, asm: "DIVSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
{name: "DIVSDload", argLength: 3, reg: fp21load, asm: "DIVSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
{name: "ADDSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "ADDSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
{name: "ADDSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "ADDSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
{name: "ADDSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "ADDSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
{name: "ADDSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "ADDSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
{name: "SUBSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "SUBSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
{name: "SUBSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "SUBSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
{name: "SUBSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "SUBSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
{name: "SUBSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "SUBSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
{name: "MULSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "MULSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
{name: "MULSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "MULSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
{name: "MULSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "MULSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
{name: "MULSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "MULSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
{name: "DIVSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "DIVSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 / tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
{name: "DIVSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "DIVSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 / tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
{name: "DIVSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "DIVSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 / tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
{name: "DIVSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "DIVSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 / tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
// binary ops
{name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, // arg0 + arg1
{name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
@ -730,6 +748,7 @@ func init() {
clobbers: buildReg("DI"),
},
faultOnNilArg0: true,
unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
},
{name: "MOVOconst", reg: regInfo{nil, 0, []regMask{fp}}, typ: "Int128", aux: "Int128", rematerializeable: true},
@ -748,9 +767,9 @@ func init() {
faultOnNilArg0: true,
},
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
// arg0 = destination pointer
// arg1 = source pointer
@ -768,6 +787,7 @@ func init() {
clobberFlags: true,
faultOnNilArg0: true,
faultOnNilArg1: true,
unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
},
// arg0 = destination pointer

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -467,13 +467,13 @@ func init() {
// conditional instructions; auxint is
// one of the arm64 comparison pseudo-ops (LessThan, LessThanU, etc.)
{name: "CSEL", argLength: 3, reg: gp2flags1, asm: "CSEL", aux: "CCop"}, // aux(flags) ? arg0 : arg1
{name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // aux(flags) ? arg0 : 0
{name: "CSEL", argLength: 3, reg: gp2flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : arg1
{name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : 0
// function calls
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
// pseudo-ops
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
@ -507,6 +507,7 @@ func init() {
clobbers: buildReg("R20 R30"),
},
faultOnNilArg0: true,
unsafePoint: true, // FP maintenance around DUFFZERO can be clobbered by interrupts
},
// large zeroing
@ -547,6 +548,7 @@ func init() {
},
faultOnNilArg0: true,
faultOnNilArg1: true,
unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
},
// large move

View file

@ -428,9 +428,9 @@ func init() {
{name: "SRAcond", argLength: 3, reg: gp2flags1, asm: "SRA"}, // arg0 >> 31 if flags indicates HS, arg0 >> arg1 otherwise, signed shift, arg2=flags
// function calls
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
// pseudo-ops
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.

View file

@ -11,8 +11,8 @@
(Mul(64|32|16|8) x y) => (Select1 (MULVU x y))
(Mul(32|64)F ...) => (MUL(F|D) ...)
(Mul64uhilo ...) => (MULVU ...)
(Select0 (Mul64uover x y)) -> (Select1 <typ.UInt64> (MULVU x y))
(Select1 (Mul64uover x y)) -> (SGTU <typ.Bool> (Select0 <typ.UInt64> (MULVU x y)) (MOVVconst <typ.UInt64> [0]))
(Select0 (Mul64uover x y)) => (Select1 <typ.UInt64> (MULVU x y))
(Select1 (Mul64uover x y)) => (SGTU <typ.Bool> (Select0 <typ.UInt64> (MULVU x y)) (MOVVconst <typ.UInt64> [0]))
(Hmul64 x y) => (Select0 (MULV x y))
(Hmul64u x y) => (Select0 (MULVU x y))
@ -38,8 +38,8 @@
(Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
(Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
// (x + y) / 2 with x>=y -> (x - y) / 2 + y
(Avg64u <t> x y) -> (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
// (x + y) / 2 with x>=y => (x - y) / 2 + y
(Avg64u <t> x y) => (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
(And(64|32|16|8) ...) => (AND ...)
(Or(64|32|16|8) ...) => (OR ...)
@ -130,10 +130,10 @@
(Not x) => (XORconst [1] x)
// constants
(Const(64|32|16|8) ...) -> (MOVVconst ...)
(Const(32|64)F ...) -> (MOV(F|D)const ...)
(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)])
(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
(ConstNil) => (MOVVconst [0])
(ConstBool ...) -> (MOVVconst ...)
(ConstBool [b]) => (MOVVconst [int64(b2i(b))])
(Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63])
@ -161,7 +161,7 @@
(SignExt16to64 ...) => (MOVHreg ...)
(SignExt32to64 ...) => (MOVWreg ...)
// float <-> int conversion
// float <=> int conversion
(Cvt32to32F ...) => (MOVWF ...)
(Cvt32to64F ...) => (MOVWD ...)
(Cvt64to32F ...) => (MOVVF ...)
@ -214,11 +214,11 @@
(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y))
(OffPtr [off] ptr:(SP)) -> (MOVVaddr [off] ptr)
(OffPtr [off] ptr) -> (ADDVconst [off] ptr)
(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVVaddr [int32(off)] ptr)
(OffPtr [off] ptr) => (ADDVconst [off] ptr)
(Addr ...) -> (MOVVaddr ...)
(LocalAddr {sym} base _) -> (MOVVaddr {sym} base)
(Addr {sym} base) => (MOVVaddr {sym} base)
(LocalAddr {sym} base _) => (MOVVaddr {sym} base)
// loads
(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
@ -380,24 +380,17 @@
(InterCall ...) => (CALLinter ...)
// atomic intrinsics
(AtomicLoad8 ...) -> (LoweredAtomicLoad8 ...)
(AtomicLoad32 ...) -> (LoweredAtomicLoad32 ...)
(AtomicLoad64 ...) -> (LoweredAtomicLoad64 ...)
(AtomicLoadPtr ...) -> (LoweredAtomicLoad64 ...)
(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...)
(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
(AtomicStore8 ...) -> (LoweredAtomicStore8 ...)
(AtomicStore32 ...) -> (LoweredAtomicStore32 ...)
(AtomicStore64 ...) -> (LoweredAtomicStore64 ...)
(AtomicStorePtrNoWB ...) -> (LoweredAtomicStore64 ...)
(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...)
(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
(AtomicExchange32 ...) -> (LoweredAtomicExchange32 ...)
(AtomicExchange64 ...) -> (LoweredAtomicExchange64 ...)
(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
(AtomicAdd32 ...) -> (LoweredAtomicAdd32 ...)
(AtomicAdd64 ...) -> (LoweredAtomicAdd64 ...)
(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
(AtomicCompareAndSwap32 ...) -> (LoweredAtomicCas32 ...)
(AtomicCompareAndSwap64 ...) -> (LoweredAtomicCas64 ...)
(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...)
// checks
(NilCheck ...) => (LoweredNilCheck ...)
@ -444,69 +437,69 @@
(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
// fold offset into address
(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) -> (MOVVaddr [off1+off2] {sym} ptr)
(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
// fold address into load/store
(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem)
(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBUload [off1+off2] {sym} ptr mem)
(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHload [off1+off2] {sym} ptr mem)
(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHUload [off1+off2] {sym} ptr mem)
(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem)
(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWUload [off1+off2] {sym} ptr mem)
(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVVload [off1+off2] {sym} ptr mem)
(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVFload [off1+off2] {sym} ptr mem)
(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDload [off1+off2] {sym} ptr mem)
(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBload [off1+int32(off2)] {sym} ptr mem)
(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBUload [off1+int32(off2)] {sym} ptr mem)
(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} ptr mem)
(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHUload [off1+int32(off2)] {sym} ptr mem)
(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} ptr mem)
(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWUload [off1+int32(off2)] {sym} ptr mem)
(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVload [off1+int32(off2)] {sym} ptr mem)
(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVFload [off1+int32(off2)] {sym} ptr mem)
(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} ptr mem)
(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem)
(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} ptr val mem)
(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem)
(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVVstore [off1+off2] {sym} ptr val mem)
(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVFstore [off1+off2] {sym} ptr val mem)
(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} ptr val mem)
(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBstorezero [off1+off2] {sym} ptr mem)
(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHstorezero [off1+off2] {sym} ptr mem)
(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWstorezero [off1+off2] {sym} ptr mem)
(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVVstorezero [off1+off2] {sym} ptr mem)
(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVVload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWUload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVVload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVFload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVDload [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVVstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVVstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVVstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVFstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVDstore [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr val mem)
(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVBstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVHstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVWstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
(MOVVstorezero [off1+int32(off2)] {mergeSymTyped(sym1,sym2)} ptr mem)
// store zero
(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
@ -643,10 +636,9 @@
(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))])
(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))])
(MOVVreg (MOVVconst [c])) => (MOVVconst [c])
(LoweredAtomicStore32 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero32 ptr mem)
(LoweredAtomicStore64 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero64 ptr mem)
(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst32 [c] ptr mem)
(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst64 [c] ptr mem)
(LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem)
(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem)
(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem)
// constant comparisons
(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1])

View file

@ -273,9 +273,9 @@ func init() {
{name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
// function calls
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
// duffzero
// arg0 = address of memory to zero

View file

@ -255,9 +255,9 @@ func init() {
{name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
// function calls
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
{name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
{name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
// atomic ops

View file

@ -11,6 +11,9 @@
(Sub32F ...) => (FSUBS ...)
(Sub64F ...) => (FSUB ...)
// Combine 64 bit integer multiply and adds
(ADD l:(MULLD x y) z) && objabi.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z)
(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
@ -76,6 +79,23 @@
(Abs ...) => (FABS ...)
(FMA ...) => (FMADD ...)
// Lowering extension
// Note: we always extend to 64 bits even though some ops don't need that many result bits.
(SignExt8to(16|32|64) ...) => (MOVBreg ...)
(SignExt16to(32|64) ...) => (MOVHreg ...)
(SignExt32to64 ...) => (MOVWreg ...)
(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
(ZeroExt16to(32|64) ...) => (MOVHZreg ...)
(ZeroExt32to64 ...) => (MOVWZreg ...)
(Trunc(16|32|64)to8 <t> x) && isSigned(t) => (MOVBreg x)
(Trunc(16|32|64)to8 x) => (MOVBZreg x)
(Trunc(32|64)to16 <t> x) && isSigned(t) => (MOVHreg x)
(Trunc(32|64)to16 x) => (MOVHZreg x)
(Trunc64to32 <t> x) && isSigned(t) => (MOVWreg x)
(Trunc64to32 x) => (MOVWZreg x)
// Lowering constants
(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
(Const(32|64)F ...) => (FMOV(S|D)const ...)
@ -107,13 +127,21 @@
// Rotate generation with non-const shift
// these match patterns from math/bits/RotateLeft[32|64], but there could be others
(ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
(ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
(ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
(XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
(XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
// Lowering rotates
(RotateLeft32 x y) => (ROTLW x y)
(RotateLeft64 x y) => (ROTL x y)
@ -189,11 +217,15 @@
(Rsh64Ux64 x (AND y (MOVDconst [63]))) => (SRD x (ANDconst <typ.Int64> [63] y))
(Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) => (SRD x (ANDconst <typ.UInt> [63] y))
(Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
(Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))) => (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
(Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
(Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63])))) => (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
(Rsh64x64 x (AND y (MOVDconst [63]))) => (SRAD x (ANDconst <typ.Int64> [63] y))
(Rsh64x64 x (ANDconst <typ.UInt> [63] y)) => (SRAD x (ANDconst <typ.UInt> [63] y))
(Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
(Rsh64x64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))) => (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
(Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
(Rsh64x64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63])))) => (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
(Lsh64x64 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
(Rsh64x64 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
@ -205,12 +237,16 @@
(Rsh32Ux64 x (AND y (MOVDconst [31]))) => (SRW x (ANDconst <typ.Int32> [31] y))
(Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) => (SRW x (ANDconst <typ.UInt> [31] y))
(Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
(Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))) => (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
(Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
(Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31])))) => (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
(Rsh32x64 x (AND y (MOVDconst [31]))) => (SRAW x (ANDconst <typ.Int32> [31] y))
(Rsh32x64 x (ANDconst <typ.UInt> [31] y)) => (SRAW x (ANDconst <typ.UInt> [31] y))
(Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
(Rsh32x64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))) => (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
(Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
(Rsh32x64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31])))) => (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
(Rsh32x64 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
(Rsh32Ux64 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
@ -273,18 +309,11 @@
(Rsh8Ux8 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
(Lsh8x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
// Cleaning up shift ops when input is masked
(MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && int64(c) + d < 0 => (MOVDconst [-1])
// Cleaning up shift ops
(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPU (ANDconst [d] y) (MOVDconst [c]))) && c >= d => (ANDconst [d] y)
(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y))) && c >= d => (ANDconst [d] y)
(ORN x (MOVDconst [-1])) => x
(ADDconstForCarry [c] (MOVDconst [d])) && c < 0 && (c < 0 || int64(c) + d >= 0) => (FlagCarryClear)
(ADDconstForCarry [c] (MOVDconst [d])) && c < 0 && c >= 0 && int64(c) + d < 0 => (FlagCarrySet)
(MaskIfNotCarry (FlagCarrySet)) => (MOVDconst [0])
(MaskIfNotCarry (FlagCarryClear)) => (MOVDconst [-1])
(S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x)
(S(RAW|RW|LW) x (MOVDconst [c])) => (S(RAW|RW|LW)const [c&31 | (c>>5&1*31)] x)
@ -303,8 +332,8 @@
(Ctz16 x) => (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
(Ctz8 x) => (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
(BitLen64 x) => (SUB (MOVDconst [64]) (CNTLZD <typ.Int> x))
(BitLen32 x) => (SUB (MOVDconst [32]) (CNTLZW <typ.Int> x))
(BitLen64 x) => (SUBFCconst [64] (CNTLZD <typ.Int> x))
(BitLen32 x) => (SUBFCconst [32] (CNTLZW <typ.Int> x))
(PopCount64 ...) => (POPCNTD ...)
(PopCount32 x) => (POPCNTW (MOVWZreg x))
@ -768,16 +797,40 @@
(MOVWreg y:(MOVWZreg x)) => (MOVWreg x)
(MOVWZreg y:(MOVWreg x)) => (MOVWZreg x)
// Truncate then logical then truncate: omit first, lesser or equal truncate
(MOVWZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVWZreg ((OR|XOR|AND) <t> x y))
(MOVHZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
(MOVHZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
(MOVBZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
(MOVBZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
(MOVBZreg ((OR|XOR|AND) <t> x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
(MOV(B|H|W)Zreg z:(ANDconst [c] (MOVBZload ptr x))) => z
(MOVBZreg z:(AND y (MOVBZload ptr x))) => z
(MOV(H|W)Zreg z:(ANDconst [c] (MOVHZload ptr x))) => z
(MOVHZreg z:(AND y (MOVHZload ptr x))) => z
(MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) => z
(MOVWZreg z:(AND y (MOVWZload ptr x))) => z
// Arithmetic constant ops
(ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [c] x)
(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x)
(ADDconst [0] x) => x
(SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x)
// TODO deal with subtract-from-const
(ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x)
// Subtract from (with carry, but ignored) constant.
// Note, these clobber the carry bit.
(SUB (MOVDconst [c]) x) && is32Bit(c) => (SUBFCconst [c] x)
(SUBFCconst [c] (NEG x)) => (ADDconst [c] x)
(SUBFCconst [c] (SUBFCconst [d] x)) && is32Bit(c-d) => (ADDconst [c-d] x)
(SUBFCconst [0] x) => (NEG x)
(ADDconst [c] (SUBFCconst [d] x)) && is32Bit(c+d) => (SUBFCconst [c+d] x)
(NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x)
(NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x)
// Use register moves instead of stores and loads to move int<=>float values
// Common with math Float64bits, Float64frombits
(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) => (MFVSRD x)
@ -928,23 +981,6 @@
(AtomicAnd8 ...) => (LoweredAtomicAnd8 ...)
(AtomicOr8 ...) => (LoweredAtomicOr8 ...)
// Lowering extension
// Note: we always extend to 64 bits even though some ops don't need that many result bits.
(SignExt8to(16|32|64) ...) => (MOVBreg ...)
(SignExt16to(32|64) ...) => (MOVHreg ...)
(SignExt32to64 ...) => (MOVWreg ...)
(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
(ZeroExt16to(32|64) ...) => (MOVHZreg ...)
(ZeroExt32to64 ...) => (MOVWZreg ...)
(Trunc(16|32|64)to8 <t> x) && isSigned(t) => (MOVBreg x)
(Trunc(16|32|64)to8 x) => (MOVBZreg x)
(Trunc(32|64)to16 <t> x) && isSigned(t) => (MOVHreg x)
(Trunc(32|64)to16 x) => (MOVHZreg x)
(Trunc64to32 <t> x) && isSigned(t) => (MOVWreg x)
(Trunc64to32 x) => (MOVWZreg x)
(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
@ -975,6 +1011,20 @@
(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
(MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
// Implement clrsldi and clrslwi extended mnemonics as described in
// ISA 3.0 section C.8. AuxInt field contains values needed for
// the instructions, packed together since there is only one available.
(SLDconst [c] z:(MOVBZreg x)) && c < 8 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
(SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
(SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
(SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
(SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
(SLWconst [c] z:(MOVWZreg x)) && z.Uses == 1 && c < 24 => (CLRLSLWI [newPPC64ShiftAuxInt(c,8,31,32)] x)
(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
// Lose widening ops fed to stores
(MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem)

Some files were not shown because too many files have changed in this diff Show more