mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
[dev.ssa] Merge remote-tracking branch 'origin/master' into mergebranch
The only major fixup is that duffzero changed from 8-byte writes to 16-byte writes. Change-Id: I1762b74ce67a8e4b81c11568027cdb3572f7f87c
This commit is contained in:
commit
7c4fbb650c
545 changed files with 26683 additions and 12849 deletions
61
AUTHORS
61
AUTHORS
|
|
@ -11,10 +11,12 @@
|
|||
A Medium Corporation
|
||||
Aamir Khan <syst3m.w0rm@gmail.com>
|
||||
Aaron France <aaron.l.france@gmail.com>
|
||||
Aaron Torres <tcboox@gmail.com>
|
||||
Abhinav Gupta <abhinav.g90@gmail.com>
|
||||
Adrian Nos <nos.adrian@gmail.com>
|
||||
Adrian O'Grady <elpollouk@gmail.com>
|
||||
Adrien Bustany <adrien-xx-google@bustany.org>
|
||||
Aécio Júnior <aeciodantasjunior@gmail.com>
|
||||
Ahmed Waheed Moanes <oneofone@gmail.com>
|
||||
Ainar Garipov <gugl.zadolbal@gmail.com>
|
||||
Akshat Kumar <seed@mail.nanosouffle.net>
|
||||
|
|
@ -31,6 +33,7 @@ Alex Schroeder <alex@gnu.org>
|
|||
Alex Sergeyev <abc@alexsergeyev.com>
|
||||
Alexander Larsson <alexander.larsson@gmail.com>
|
||||
Alexander Morozov <lk4d4math@gmail.com>
|
||||
Alexander Neumann <alexander@bumpern.de>
|
||||
Alexander Orlov <alexander.orlov@loxal.net>
|
||||
Alexander Reece <awreece@gmail.com>
|
||||
Alexander Surma <surma@surmair.de>
|
||||
|
|
@ -41,6 +44,7 @@ Alexandre Normand <alexandre.normand@gmail.com>
|
|||
Alexei Sholik <alcosholik@gmail.com>
|
||||
Alexey Borzenkov <snaury@gmail.com>
|
||||
Alexey Palazhchenko <alexey.palazhchenko@gmail.com>
|
||||
Alif Rachmawadi <subosito@gmail.com>
|
||||
Amir Mohammad Saied <amir@gluegadget.com>
|
||||
Amrut Joshi <amrut.joshi@gmail.com>
|
||||
Andrei Korzhevskii <a.korzhevskiy@gmail.com>
|
||||
|
|
@ -49,6 +53,7 @@ Andrew Balholm <andybalholm@gmail.com>
|
|||
Andrew Bonventre <andybons@chromium.org>
|
||||
Andrew Bursavich <abursavich@gmail.com>
|
||||
Andrew Ekstedt <andrew.ekstedt@gmail.com>
|
||||
Andrew Etter <andrew.etter@gmail.com>
|
||||
Andrew Harding <andrew@spacemonkey.com>
|
||||
Andrew Lutomirski <andy@luto.us>
|
||||
Andrew Pritchard <awpritchard@gmail.com>
|
||||
|
|
@ -64,6 +69,7 @@ Andy Davis <andy@bigandian.com>
|
|||
Andy Maloney <asmaloney@gmail.com>
|
||||
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
|
||||
Anh Hai Trinh <anh.hai.trinh@gmail.com>
|
||||
Anmol Sethi <anmol@aubble.com>
|
||||
Anschel Schaffer-Cohen <anschelsc@gmail.com>
|
||||
Anthony Eufemio <anthony.eufemio@gmail.com>
|
||||
Anthony Martin <ality@pbrane.org>
|
||||
|
|
@ -74,6 +80,7 @@ Areski Belaid <areski@gmail.com>
|
|||
Arnaud Ysmal <arnaud.ysmal@gmail.com>
|
||||
Arne Hormann <arnehormann@gmail.com>
|
||||
Aron Nopanen <aron.nopanen@gmail.com>
|
||||
Artyom Pervukhin <artyom.pervukhin@gmail.com>
|
||||
Arvindh Rajesh Tamilmani <art@a-30.net>
|
||||
Ato Araki <ato.araki@gmail.com>
|
||||
Aulus Egnatius Varialus <varialus@gmail.com>
|
||||
|
|
@ -112,6 +119,7 @@ Charles L. Dorian <cldorian@gmail.com>
|
|||
Charles Lee <zombie.fml@gmail.com>
|
||||
Chris Dollin <ehog.hedge@gmail.com>
|
||||
Chris Farmiloe <chrisfarms@gmail.com>
|
||||
Chris Hines <chris.cs.guy@gmail.com>
|
||||
Chris Howey <howeyc@gmail.com>
|
||||
Chris Jones <chris@cjones.org>
|
||||
Chris Kastorff <encryptio@gmail.com>
|
||||
|
|
@ -130,6 +138,7 @@ Clement Skau <clementskau@gmail.com>
|
|||
CloudFlare Inc.
|
||||
Colin Kennedy <moshen.colin@gmail.com>
|
||||
Conrad Meyer <cemeyer@cs.washington.edu>
|
||||
CoreOS, Inc.
|
||||
Corey Thomasson <cthom.lists@gmail.com>
|
||||
Cristian Staretu <unclejacksons@gmail.com>
|
||||
Damian Gryski <dgryski@gmail.com>
|
||||
|
|
@ -137,9 +146,12 @@ Dan Callahan <dan.callahan@gmail.com>
|
|||
Dan Peterson <dpiddy@gmail.com>
|
||||
Dan Sinclair <dan.sinclair@gmail.com>
|
||||
Daniel Fleischman <danielfleischman@gmail.com>
|
||||
Daniel Johansson <dajo2002@gmail.com>
|
||||
Daniel Kerwin <d.kerwin@gini.net>
|
||||
Daniel Krech <eikeon@eikeon.com>
|
||||
Daniel Lidén <daniel.liden.87@gmail.com>
|
||||
Daniel Morsing <daniel.morsing@gmail.com>
|
||||
Daniel Ortiz Pereira da Silva <daniel.particular@gmail.com>
|
||||
Daniel Theophanes <kardianos@gmail.com>
|
||||
Darren Elwood <darren@textnode.com>
|
||||
Dave Cheney <dave@cheney.net>
|
||||
|
|
@ -150,13 +162,16 @@ David Forsythe <dforsythe@gmail.com>
|
|||
David G. Andersen <dave.andersen@gmail.com>
|
||||
David Jakob Fritz <david.jakob.fritz@gmail.com>
|
||||
David Leon Gil <coruus@gmail.com>
|
||||
David R. Jenni <david.r.jenni@gmail.com>
|
||||
David Thomas <davidthomas426@gmail.com>
|
||||
David Titarenco <david.titarenco@gmail.com>
|
||||
Davies Liu <davies.liu@gmail.com>
|
||||
Dean Prichard <dean.prichard@gmail.com>
|
||||
Denis Bernard <db047h@gmail.com>
|
||||
Denis Brandolini <denis.brandolini@gmail.com>
|
||||
Derek Buitenhuis <derek.buitenhuis@gmail.com>
|
||||
Derek Parker <parkerderek86@gmail.com>
|
||||
Develer SRL
|
||||
Devon H. O'Dell <devon.odell@gmail.com>
|
||||
Dhiru Kholia <dhiru.kholia@gmail.com>
|
||||
Didier Spezia <didier.06@gmail.com>
|
||||
|
|
@ -166,6 +181,7 @@ Dmitriy Shelenin <deemok@googlemail.com> <deemok@gmail.com>
|
|||
Dmitry Chestnykh <dchest@gmail.com>
|
||||
Dmitry Savintsev <dsavints@gmail.com>
|
||||
Dominik Honnef <dominik.honnef@gmail.com>
|
||||
Donald Huang <don.hcd@gmail.com>
|
||||
Donovan Hide <donovanhide@gmail.com>
|
||||
Dropbox, Inc.
|
||||
Duncan Holm <mail@frou.org>
|
||||
|
|
@ -180,6 +196,7 @@ Elias Naur <elias.naur@gmail.com>
|
|||
Emil Hessman <c.emil.hessman@gmail.com> <emil@hessman.se>
|
||||
Eoghan Sherry <ejsherry@gmail.com>
|
||||
Eric Clark <zerohp@gmail.com>
|
||||
Eric Lagergren <ericscottlagergren@gmail.com>
|
||||
Eric Milliken <emilliken@gmail.com>
|
||||
Eric Roshan-Eisner <eric.d.eisner@gmail.com>
|
||||
Erik Aigner <aigner.erik@gmail.com>
|
||||
|
|
@ -189,6 +206,7 @@ Esko Luontola <esko.luontola@gmail.com>
|
|||
Evan Phoenix <evan@phx.io>
|
||||
Evan Shaw <chickencha@gmail.com>
|
||||
Ewan Chou <coocood@gmail.com>
|
||||
Fabian Wickborn <fabian@wickborn.net>
|
||||
Fabrizio Milo <mistobaan@gmail.com>
|
||||
Fan Hongjian <fan.howard@gmail.com>
|
||||
Fastly, Inc.
|
||||
|
|
@ -234,6 +252,7 @@ Henning Schmiedehausen <henning@schmiedehausen.org>
|
|||
Henrik Edwards <henrik.edwards@gmail.com>
|
||||
Herbert Georg Fischer <herbert.fischer@gmail.com>
|
||||
Hong Ruiqi <hongruiqi@gmail.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
IBM
|
||||
Icarus Sparry <golang@icarus.freeuk.com>
|
||||
Igneous Systems, Inc.
|
||||
|
|
@ -241,6 +260,7 @@ Igor Dolzhikov <bluesriverz@gmail.com>
|
|||
INADA Naoki <songofacandy@gmail.com>
|
||||
Ingo Krabbe <ikrabbe.ask@gmail.com>
|
||||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
Intel Corporation
|
||||
Isaac Wagner <ibw@isaacwagner.me>
|
||||
Ivan Ukhov <ivan.ukhov@gmail.com>
|
||||
Jae Kwon <jae@tendermint.com>
|
||||
|
|
@ -251,6 +271,8 @@ James Fysh <james.fysh@gmail.com>
|
|||
James Gray <james@james4k.com>
|
||||
James Meneghello <rawrz0r@gmail.com>
|
||||
James P. Cooper <jamespcooper@gmail.com>
|
||||
James Schofield <james@shoeboxapp.com>
|
||||
James Sweet <james.sweet88@googlemail.com>
|
||||
James Toy <nil@opensesame.st>
|
||||
James Whitehead <jnwhiteh@gmail.com>
|
||||
Jan H. Hosang <jan.hosang@gmail.com>
|
||||
|
|
@ -269,6 +291,7 @@ Jeff Sickel <jas@corpus-callosum.com>
|
|||
Jeff Wendling <jeff@spacemonkey.com>
|
||||
Jens Frederich <jfrederich@gmail.com>
|
||||
Jeremy Jackins <jeremyjackins@gmail.com>
|
||||
Jihyun Yu <yjh0502@gmail.com>
|
||||
Jim McGrath <jimmc2@gmail.com>
|
||||
Jimmy Zelinskie <jimmyzelinskie@gmail.com>
|
||||
Jingcheng Zhang <diogin@gmail.com>
|
||||
|
|
@ -297,6 +320,7 @@ Josh Bleecher Snyder <josharian@gmail.com>
|
|||
Josh Goebel <dreamer3@gmail.com>
|
||||
Josh Holland <jrh@joshh.co.uk>
|
||||
Joshua Chase <jcjoshuachase@gmail.com>
|
||||
Jostein Stuhaug <js@solidsystem.no>
|
||||
JT Olds <jtolds@xnet5.com>
|
||||
Jukka-Pekka Kekkonen <karatepekka@gmail.com>
|
||||
Julian Phillips <julian@quantumfyre.co.uk>
|
||||
|
|
@ -308,13 +332,18 @@ Kang Hu <hukangustc@gmail.com>
|
|||
Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
|
||||
Katrina Owen <katrina.owen@gmail.com>
|
||||
Kei Son <hey.calmdown@gmail.com>
|
||||
Keith Ball <inflatablewoman@gmail.com>
|
||||
Keith Rarick <kr@xph.us>
|
||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
||||
Kelvin Foo Chuan Lyi <vmirage@gmail.com>
|
||||
Ken Friedenbach <kenliz@cruzio.com>
|
||||
Ken Rockot <ken@oz.gs>
|
||||
Ken Sedgwick <ken@bonsai.com>
|
||||
Kevin Ballard <kevin@sb.org>
|
||||
Konstantin Shaposhnikov <k.shaposhnikov@gmail.com>
|
||||
KPCompass, Inc.
|
||||
Kristopher Watts <traetox@gmail.com>
|
||||
Kun Li <likunarmstrong@gmail.com>
|
||||
Kyle Consalus <consalus@gmail.com>
|
||||
Kyle Isom <kyle@gokyle.net>
|
||||
Kyle Lemons <kyle@kylelemons.net>
|
||||
|
|
@ -322,14 +351,17 @@ L Campbell <unpantsu@gmail.com>
|
|||
Lai Jiangshan <eag0628@gmail.com>
|
||||
Larz Conwell <larzconwell@gmail.com>
|
||||
Lee Packham <lpackham@gmail.com>
|
||||
Liberty Fund Inc
|
||||
Linaro Limited
|
||||
Lloyd Dewolf <foolswisdom@gmail.com>
|
||||
Lorenzo Stoakes <lstoakes@gmail.com>
|
||||
Luca Greco <luca.greco@alcacoop.it>
|
||||
Lucien Stuker <lucien.stuker@gmail.com>
|
||||
Lucio De Re <lucio.dere@gmail.com>
|
||||
Luit van Drongelen <luitvd@gmail.com>
|
||||
Luka Zakrajšek <tr00.g33k@gmail.com>
|
||||
Luke Curley <qpingu@gmail.com>
|
||||
Mal Curtis <mal@mal.co.nz>
|
||||
Manuel Mendez <mmendez534@gmail.com>
|
||||
Marc Weistroff <marc@weistroff.net>
|
||||
Marco Hennings <marco.hennings@freiheit.com>
|
||||
|
|
@ -344,6 +376,7 @@ Markus Zimmermann <zimmski@gmail.com>
|
|||
Martin Möhrmann <martisch@uos.de>
|
||||
Martin Neubauer <m.ne@gmx.net>
|
||||
Martin Olsson <martin@minimum.se>
|
||||
Marvin Stenger <marvin.stenger94@gmail.com>
|
||||
Mateusz Czapliński <czapkofan@gmail.com>
|
||||
Mathias Beke <git@denbeke.be>
|
||||
Mathieu Lonjaret <mathieu.lonjaret@gmail.com>
|
||||
|
|
@ -362,6 +395,7 @@ Matthew Holt <Matthew.Holt+git@gmail.com>
|
|||
Matthew Horsnell <matthew.horsnell@gmail.com>
|
||||
Maxim Khitrov <max@mxcrypt.com>
|
||||
Meir Fischer <meirfischer@gmail.com>
|
||||
Meteor Development Group
|
||||
Micah Stetson <micah.stetson@gmail.com>
|
||||
Michael Chaten <mchaten@gmail.com>
|
||||
Michael Elkins <michael.elkins@gmail.com>
|
||||
|
|
@ -376,6 +410,7 @@ Michael Schaller <michael@5challer.de>
|
|||
Michael Stapelberg <michael@stapelberg.de>
|
||||
Michael Teichgräber <mteichgraeber@gmx.de>
|
||||
Michael Vetter <g.bluehut@gmail.com>
|
||||
Michal Bohuslávek <mbohuslavek@gmail.com>
|
||||
Michał Derkacz <ziutek@lnet.pl>
|
||||
Miek Gieben <miek@miek.nl>
|
||||
Mihai Borobocea <MihaiBorobocea@gmail.com>
|
||||
|
|
@ -394,6 +429,7 @@ Nan Deng <monnand@gmail.com>
|
|||
Nathan John Youngman <nj@nathany.com>
|
||||
Nathan P Finch <nate.finch@gmail.com>
|
||||
Nathan Youngman <git@nathany.com>
|
||||
Neelesh Chandola <neelesh.c98@gmail.com>
|
||||
Nevins Bartolomeo <nevins.bartolomeo@gmail.com>
|
||||
ngmoco, LLC
|
||||
Nicholas Katsaros <nick@nickkatsaros.com>
|
||||
|
|
@ -405,7 +441,10 @@ Nicolas Kaiser <nikai@nikai.net>
|
|||
Nicolas Owens <mischief@offblast.org>
|
||||
Nicolas S. Dade <nic.dade@gmail.com>
|
||||
Nigel Kerr <nigel.kerr@gmail.com>
|
||||
Nikolay Turpitko <nikolay@turpitko.com>
|
||||
Noah Campbell <noahcampbell@gmail.com>
|
||||
Norberto Lopes <nlopes.ml@gmail.com>
|
||||
Oleku Konko <oleku.konko@gmail.com>
|
||||
Oling Cat <olingcat@gmail.com>
|
||||
Oliver Hookins <ohookins@gmail.com>
|
||||
Olivier Antoine <olivier.antoine@gmail.com>
|
||||
|
|
@ -426,11 +465,14 @@ Patrick Smith <pat42smith@gmail.com>
|
|||
Paul A Querna <paul.querna@gmail.com>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Paul Lalonde <paul.a.lalonde@gmail.com>
|
||||
Paul Rosania <paul.rosania@gmail.com>
|
||||
Paul Sbarra <Sbarra.Paul@gmail.com>
|
||||
Paul Smith <paulsmith@pobox.com> <paulsmith@gmail.com>
|
||||
Paul van Brouwershaven <paul@vanbrouwershaven.com>
|
||||
Pavel Zinovkin <pavel.zinovkin@gmail.com>
|
||||
Pawel Knap <pawelknap88@gmail.com>
|
||||
Percy Wegmann <ox.to.a.cart@gmail.com>
|
||||
Perry Abbott <perry.j.abbott@gmail.com>
|
||||
Petar Maymounkov <petarm@gmail.com>
|
||||
Peter Armitage <peter.armitage@gmail.com>
|
||||
Peter Froehlich <peter.hans.froehlich@gmail.com>
|
||||
|
|
@ -443,6 +485,7 @@ Peter Waldschmidt <peter@waldschmidt.com>
|
|||
Peter Waller <peter.waller@gmail.com>
|
||||
Peter Williams <pwil3058@gmail.com>
|
||||
Philip K. Warren <pkwarren@gmail.com>
|
||||
Pierre Roullon <pierre.roullon@gmail.com>
|
||||
Pieter Droogendijk <pieter@binky.org.uk>
|
||||
Pietro Gagliardi <pietro10@mac.com>
|
||||
Preetam Jinka <pj@preet.am>
|
||||
|
|
@ -451,6 +494,7 @@ Quoc-Viet Nguyen <afelion@gmail.com>
|
|||
Raif S. Naffah <go@naffah-raif.name>
|
||||
Rajat Goel <rajat.goel2010@gmail.com>
|
||||
Red Hat, Inc.
|
||||
Reinaldo de Souza Jr <juniorz@gmail.com>
|
||||
Rémy Oudompheng <oudomphe@phare.normalesup.org>
|
||||
Richard Barnes <rlb@ipv.sx>
|
||||
Richard Crowley <r@rcrowley.org>
|
||||
|
|
@ -463,6 +507,7 @@ Robert Dinu <r@varp.se>
|
|||
Robert Figueiredo <robfig@gmail.com>
|
||||
Robert Hencke <robert.hencke@gmail.com>
|
||||
Robert Obryk <robryk@gmail.com>
|
||||
Robert Stepanek <robert.stepanek@gmail.com>
|
||||
Robin Eklind <r.eklind.87@gmail.com>
|
||||
Rodrigo Moraes de Oliveira <rodrigo.moraes@gmail.com>
|
||||
Rodrigo Rafael Monti Kochenburger <divoxx@gmail.com>
|
||||
|
|
@ -472,10 +517,13 @@ Ron Hashimoto <mail@h2so5.net>
|
|||
Ron Minnich <rminnich@gmail.com>
|
||||
Ross Light <rlight2@gmail.com>
|
||||
Rowan Worth <sqweek@gmail.com>
|
||||
Russell Haering <russellhaering@gmail.com>
|
||||
Ryan Hitchman <hitchmanr@gmail.com>
|
||||
Ryan Lower <rpjlower@gmail.com>
|
||||
Ryan Seys <ryan@ryanseys.com>
|
||||
Ryan Slade <ryanslade@gmail.com>
|
||||
S.Çağlar Onur <caglar@10ur.org>
|
||||
Salmān Aljammāz <s@0x65.net>
|
||||
Sanjay Menakuru <balasanjay@gmail.com>
|
||||
Scott Barron <scott.barron@github.com>
|
||||
Scott Ferguson <scottwferg@gmail.com>
|
||||
|
|
@ -486,6 +534,7 @@ Sergei Skorobogatov <skorobo@rambler.ru>
|
|||
Sergey 'SnakE' Gromov <snake.scaly@gmail.com>
|
||||
Sergio Luis O. B. Correia <sergio@correia.cc>
|
||||
Shane Hansen <shanemhansen@gmail.com>
|
||||
Shaozhen Ding <dsz0111@gmail.com>
|
||||
Shawn Smith <shawn.p.smith@gmail.com>
|
||||
Shenghou Ma <minux.ma@gmail.com>
|
||||
Shivakumar GN <shivakumar.gn@gmail.com>
|
||||
|
|
@ -493,6 +542,7 @@ Silvan Jegen <s.jegen@gmail.com>
|
|||
Simon Whitehead <chemnova@gmail.com>
|
||||
Sokolov Yura <funny.falcon@gmail.com>
|
||||
Spring Mc <heresy.mc@gmail.com>
|
||||
Square, Inc.
|
||||
StalkR <stalkr@stalkr.net>
|
||||
Stan Schwertly <stan@schwertly.com>
|
||||
Stefan Nilsson <snilsson@nada.kth.se> <trolleriprofessorn@gmail.com>
|
||||
|
|
@ -508,13 +558,17 @@ Sven Almgren <sven@tras.se>
|
|||
Szabolcs Nagy <nsz@port70.net>
|
||||
Tad Glines <tad.glines@gmail.com>
|
||||
Taj Khattra <taj.khattra@gmail.com>
|
||||
Takeshi YAMANASHI <9.nashi@gmail.com>
|
||||
Tamir Duberstein <tamird@gmail.com>
|
||||
Tarmigan Casebolt <tarmigan@gmail.com>
|
||||
Taru Karttunen <taruti@taruti.net>
|
||||
Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com>
|
||||
Tetsuo Kiso <tetsuokiso9@gmail.com>
|
||||
Thiago Fransosi Farina <thiago.farina@gmail.com>
|
||||
Thomas Alan Copeland <talan.copeland@gmail.com>
|
||||
Thomas Desrosiers <thomasdesr@gmail.com>
|
||||
Thomas Kappler <tkappler@gmail.com>
|
||||
Tim Cooijmans <timcooijmans@gmail.com>
|
||||
Timo Savola <timo.savola@gmail.com>
|
||||
Timo Truyts <alkaloid.btx@gmail.com>
|
||||
Tobias Columbus <tobias.columbus@gmail.com>
|
||||
|
|
@ -523,12 +577,15 @@ Tom Heng <zhm20070928@gmail.com>
|
|||
Tom Linford <tomlinford@gmail.com>
|
||||
Tommy Schaefer <tommy.schaefer@teecom.com>
|
||||
Tor Andersson <tor.andersson@gmail.com>
|
||||
Totoro W <tw19881113@gmail.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Trey Tacon <ttacon@gmail.com>
|
||||
Tudor Golubenco <tudor.g@gmail.com>
|
||||
Tw <tw19881113@gmail.com>
|
||||
Tyler Bunnell <tylerbunnell@gmail.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
||||
Ugorji Nwoke <ugorji@gmail.com>
|
||||
Ulf Holm Nielsen <doktor@dyregod.dk>
|
||||
Ulrich Kunitz <uli.kunitz@gmail.com>
|
||||
Uriel Mangado <uriel@berlinblue.org>
|
||||
Vadim Vygonets <unixdj@gmail.com>
|
||||
Vincent Ambo <tazjin@googlemail.com>
|
||||
|
|
@ -543,7 +600,9 @@ William Josephson <wjosephson@gmail.com>
|
|||
William Orr <will@worrbase.com> <ay1244@gmail.com>
|
||||
Xia Bin <snyh@snyh.org>
|
||||
Xing Xing <mikespook@gmail.com>
|
||||
Yann Kerhervé <yann.kerherve@gmail.com>
|
||||
Yasuhiro Matsumoto <mattn.jp@gmail.com>
|
||||
Yesudeep Mangalapilly <yesudeep@google.com>
|
||||
Yissakhar Z. Beck <yissakhar.beck@gmail.com>
|
||||
Yo-An Lin <yoanlin93@gmail.com>
|
||||
Yongjian Xu <i3dmaster@gmail.com>
|
||||
|
|
|
|||
86
CONTRIBUTORS
86
CONTRIBUTORS
|
|
@ -35,11 +35,13 @@ Aamir Khan <syst3m.w0rm@gmail.com>
|
|||
Aaron France <aaron.l.france@gmail.com>
|
||||
Aaron Jacobs <jacobsa@google.com>
|
||||
Aaron Kemp <kemp.aaron@gmail.com>
|
||||
Aaron Torres <tcboox@gmail.com>
|
||||
Abhinav Gupta <abhinav.g90@gmail.com>
|
||||
Adam Langley <agl@golang.org>
|
||||
Adrian Nos <nos.adrian@gmail.com>
|
||||
Adrian O'Grady <elpollouk@gmail.com>
|
||||
Adrien Bustany <adrien-xx-google@bustany.org>
|
||||
Aécio Júnior <aeciodantasjunior@gmail.com>
|
||||
Ahmed Waheed Moanes <oneofone@gmail.com>
|
||||
Ainar Garipov <gugl.zadolbal@gmail.com>
|
||||
Akshat Kumar <seed@mail.nanosouffle.net>
|
||||
|
|
@ -58,6 +60,7 @@ Alex Schroeder <alex@gnu.org>
|
|||
Alex Sergeyev <abc@alexsergeyev.com>
|
||||
Alexander Larsson <alexander.larsson@gmail.com>
|
||||
Alexander Morozov <lk4d4math@gmail.com>
|
||||
Alexander Neumann <alexander@bumpern.de>
|
||||
Alexander Orlov <alexander.orlov@loxal.net>
|
||||
Alexander Reece <awreece@gmail.com>
|
||||
Alexander Surma <surma@surmair.de>
|
||||
|
|
@ -70,6 +73,7 @@ Alexei Sholik <alcosholik@gmail.com>
|
|||
Alexey Borzenkov <snaury@gmail.com>
|
||||
Alexey Palazhchenko <alexey.palazhchenko@gmail.com>
|
||||
Alexis Imperial-Legrand <ail@google.com>
|
||||
Alif Rachmawadi <subosito@gmail.com>
|
||||
Amir Mohammad Saied <amir@gluegadget.com>
|
||||
Amrut Joshi <amrut.joshi@gmail.com>
|
||||
Andrea Spadaccini <spadaccio@google.com>
|
||||
|
|
@ -81,6 +85,7 @@ Andrew Balholm <andybalholm@gmail.com>
|
|||
Andrew Bonventre <andybons@chromium.org>
|
||||
Andrew Bursavich <abursavich@gmail.com>
|
||||
Andrew Ekstedt <andrew.ekstedt@gmail.com>
|
||||
Andrew Etter <andrew.etter@gmail.com>
|
||||
Andrew Gerrand <adg@golang.org>
|
||||
Andrew Harding <andrew@spacemonkey.com>
|
||||
Andrew Lutomirski <andy@luto.us>
|
||||
|
|
@ -98,6 +103,7 @@ Andy Davis <andy@bigandian.com>
|
|||
Andy Maloney <asmaloney@gmail.com>
|
||||
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
|
||||
Anh Hai Trinh <anh.hai.trinh@gmail.com>
|
||||
Anmol Sethi <anmol@aubble.com>
|
||||
Anschel Schaffer-Cohen <anschelsc@gmail.com>
|
||||
Anthony Eufemio <anthony.eufemio@gmail.com>
|
||||
Anthony Martin <ality@pbrane.org>
|
||||
|
|
@ -108,6 +114,7 @@ Areski Belaid <areski@gmail.com>
|
|||
Arnaud Ysmal <arnaud.ysmal@gmail.com>
|
||||
Arne Hormann <arnehormann@gmail.com>
|
||||
Aron Nopanen <aron.nopanen@gmail.com>
|
||||
Artyom Pervukhin <artyom.pervukhin@gmail.com>
|
||||
Arvindh Rajesh Tamilmani <art@a-30.net>
|
||||
Asim Shankar <asimshankar@gmail.com>
|
||||
Ato Araki <ato.araki@gmail.com>
|
||||
|
|
@ -141,10 +148,11 @@ Brendan O'Dea <bod@golang.org>
|
|||
Brett Cannon <bcannon@gmail.com>
|
||||
Brian Dellisanti <briandellisanti@gmail.com>
|
||||
Brian G. Merrell <bgmerrell@gmail.com>
|
||||
Brian Gitonga Marete <marete@toshnix.com> <bgmarete@gmail.com>
|
||||
Brian Gitonga Marete <marete@toshnix.com> <bgmarete@gmail.com> <bgm@google.com>
|
||||
Brian Ketelsen <bketelsen@gmail.com>
|
||||
Brian Slesinsky <skybrian@google.com>
|
||||
Brian Smith <ohohvi@gmail.com>
|
||||
Bryan C. Mills <bcmills@google.com>
|
||||
Bryan Ford <brynosaurus@gmail.com>
|
||||
Burcu Dogan <jbd@google.com> <burcujdogan@gmail.com>
|
||||
Caine Tighe <arctanofyourface@gmail.com>
|
||||
|
|
@ -166,6 +174,7 @@ Charles Lee <zombie.fml@gmail.com>
|
|||
Chris Broadfoot <cbro@golang.org>
|
||||
Chris Dollin <ehog.hedge@gmail.com>
|
||||
Chris Farmiloe <chrisfarms@gmail.com>
|
||||
Chris Hines <chris.cs.guy@gmail.com>
|
||||
Chris Howey <howeyc@gmail.com>
|
||||
Chris Hundt <hundt@google.com>
|
||||
Chris Jones <chris@cjones.org> <chris.jones.yar@gmail.com>
|
||||
|
|
@ -197,13 +206,17 @@ Dan Peterson <dpiddy@gmail.com>
|
|||
Dan Pupius <dan@medium.com>
|
||||
Dan Sinclair <dan.sinclair@gmail.com>
|
||||
Daniel Fleischman <danielfleischman@gmail.com>
|
||||
Daniel Johansson <dajo2002@gmail.com>
|
||||
Daniel Kerwin <d.kerwin@gini.net>
|
||||
Daniel Krech <eikeon@eikeon.com>
|
||||
Daniel Lidén <daniel.liden.87@gmail.com>
|
||||
Daniel Morsing <daniel.morsing@gmail.com>
|
||||
Daniel Nadasi <dnadasi@google.com>
|
||||
Daniel Ortiz Pereira da Silva <daniel.particular@gmail.com>
|
||||
Daniel Theophanes <kardianos@gmail.com>
|
||||
Darren Elwood <darren@textnode.com>
|
||||
Dave Borowitz <dborowitz@google.com>
|
||||
Dave Bort <dbort@golang.org>
|
||||
Dave Cheney <dave@cheney.net>
|
||||
Dave Day <djd@golang.org>
|
||||
Dave Grijalva <dgrijalva@ngmoco.com>
|
||||
|
|
@ -217,15 +230,18 @@ David Crawshaw <david.crawshaw@zentus.com> <crawshaw@google.com> <crawshaw@golan
|
|||
David du Colombier <0intro@gmail.com>
|
||||
David Forsythe <dforsythe@gmail.com>
|
||||
David G. Andersen <dave.andersen@gmail.com>
|
||||
David Glasser <glasser@meteor.com>
|
||||
David Jakob Fritz <david.jakob.fritz@gmail.com>
|
||||
David Leon Gil <coruus@gmail.com>
|
||||
David McLeish <davemc@google.com>
|
||||
David Presotto <presotto@gmail.com>
|
||||
David R. Jenni <david.r.jenni@gmail.com>
|
||||
David Symonds <dsymonds@golang.org>
|
||||
David Thomas <davidthomas426@gmail.com>
|
||||
David Titarenco <david.titarenco@gmail.com>
|
||||
Davies Liu <davies.liu@gmail.com>
|
||||
Dean Prichard <dean.prichard@gmail.com>
|
||||
Denis Bernard <db047h@gmail.com>
|
||||
Denis Brandolini <denis.brandolini@gmail.com>
|
||||
Derek Buitenhuis <derek.buitenhuis@gmail.com>
|
||||
Derek Parker <parkerderek86@gmail.com>
|
||||
|
|
@ -240,6 +256,7 @@ Dmitry Chestnykh <dchest@gmail.com>
|
|||
Dmitry Savintsev <dsavints@gmail.com>
|
||||
Dominik Honnef <dominik.honnef@gmail.com>
|
||||
Dominik Vogt <vogt@linux.vnet.ibm.com>
|
||||
Donald Huang <don.hcd@gmail.com>
|
||||
Donovan Hide <donovanhide@gmail.com>
|
||||
Drew Hintz <adhintz@google.com>
|
||||
Duncan Holm <mail@frou.org>
|
||||
|
|
@ -255,21 +272,26 @@ Elias Naur <elias.naur@gmail.com>
|
|||
Emil Hessman <c.emil.hessman@gmail.com> <emil@hessman.se>
|
||||
Eoghan Sherry <ejsherry@gmail.com>
|
||||
Eric Clark <zerohp@gmail.com>
|
||||
Eric Garrido <ekg@google.com>
|
||||
Eric Lagergren <ericscottlagergren@gmail.com>
|
||||
Eric Milliken <emilliken@gmail.com>
|
||||
Eric Roshan-Eisner <eric.d.eisner@gmail.com>
|
||||
Erik Aigner <aigner.erik@gmail.com>
|
||||
Erik St. Martin <alakriti@gmail.com>
|
||||
Erik Westrup <erik.westrup@gmail.com>
|
||||
Esko Luontola <esko.luontola@gmail.com>
|
||||
Evan Brown <evanbrown@google.com>
|
||||
Evan Kroske <evankroske@google.com>
|
||||
Evan Martin <evan.martin@gmail.com>
|
||||
Evan Phoenix <evan@phx.io>
|
||||
Evan Shaw <chickencha@gmail.com>
|
||||
Ewan Chou <coocood@gmail.com>
|
||||
Fabian Wickborn <fabian@wickborn.net>
|
||||
Fabrizio Milo <mistobaan@gmail.com>
|
||||
Fan Hongjian <fan.howard@gmail.com>
|
||||
Fatih Arslan <fatih@arslan.io>
|
||||
Fazlul Shahriar <fshahriar@gmail.com>
|
||||
Federico Simoncelli <fsimonce@redhat.com>
|
||||
Felix Geisendörfer <haimuiba@gmail.com>
|
||||
Firmansyah Adiputra <frm.adiputra@gmail.com>
|
||||
Florian Uekermann <florian@uekermann-online.de> <f1@uekermann-online.de>
|
||||
|
|
@ -284,6 +306,7 @@ Frithjof Schulze <schulze@math.uni-hannover.de> <sfrithjof@gmail.com>
|
|||
Fumitoshi Ukai <ukai@google.com>
|
||||
Gaal Yahas <gaal@google.com>
|
||||
Gabriel Aszalos <gabriel.aszalos@gmail.com>
|
||||
Garrick Evans <garrick@google.com>
|
||||
Gary Burd <gary@beagledreams.com> <gary.burd@gmail.com>
|
||||
Gautham Thambidorai <gautham.dorai@gmail.com>
|
||||
Geert-Johan Riemer <gjr19912@gmail.com>
|
||||
|
|
@ -292,7 +315,9 @@ George Shammas <george@shamm.as> <georgyo@gmail.com>
|
|||
Gerasimos Dimitriadis <gedimitr@gmail.com>
|
||||
Gideon Jan-Wessel Redelinghuys <gjredelinghuys@gmail.com>
|
||||
Giles Lean <giles.lean@pobox.com>
|
||||
Giovanni Bajo <rasky@develer.com>
|
||||
Giulio Iotti <dullgiulio@gmail.com>
|
||||
Glenn Brown <glennb@google.com>
|
||||
Glenn Lewis <gmlewis@google.com>
|
||||
Gordon Klaus <gordon.klaus@gmail.com>
|
||||
Graham King <graham4king@gmail.com>
|
||||
|
|
@ -317,6 +342,7 @@ Henrik Edwards <henrik.edwards@gmail.com>
|
|||
Herbert Georg Fischer <herbert.fischer@gmail.com>
|
||||
Hong Ruiqi <hongruiqi@gmail.com>
|
||||
Hossein Sheikh Attar <hattar@google.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Hyang-Ah Hana Kim <hakim@google.com> <hyangah@gmail.com>
|
||||
Ian Lance Taylor <iant@golang.org>
|
||||
Icarus Sparry <golang@icarus.freeuk.com>
|
||||
|
|
@ -339,6 +365,8 @@ James Gray <james@james4k.com>
|
|||
James Meneghello <rawrz0r@gmail.com>
|
||||
James P. Cooper <jamespcooper@gmail.com>
|
||||
James Robinson <jamesr@google.com> <jamesr.gatech@gmail.com>
|
||||
James Schofield <james@shoeboxapp.com>
|
||||
James Sweet <james.sweet88@googlemail.com>
|
||||
James Toy <nil@opensesame.st>
|
||||
James Tucker <raggi@google.com>
|
||||
James Whitehead <jnwhiteh@gmail.com>
|
||||
|
|
@ -354,6 +382,7 @@ Jan Ziak <0xe2.0x9a.0x9b@gmail.com>
|
|||
Jani Monoses <jani.monoses@ubuntu.com> <jani.monoses@gmail.com>
|
||||
Jaroslavas Počepko <jp@webmaster.ms>
|
||||
Jason Del Ponte <delpontej@gmail.com>
|
||||
Jason Hall <jasonhall@google.com>
|
||||
Jason Travis <infomaniac7@gmail.com>
|
||||
Jay Weisskopf <jay@jayschwa.net>
|
||||
Jean-Marc Eurin <jmeurin@google.com>
|
||||
|
|
@ -366,6 +395,8 @@ Jens Frederich <jfrederich@gmail.com>
|
|||
Jeremiah Harmsen <jeremiah@google.com>
|
||||
Jeremy Jackins <jeremyjackins@gmail.com>
|
||||
Jeremy Schlatter <jeremy.schlatter@gmail.com>
|
||||
Jihyun Yu <yjh0502@gmail.com>
|
||||
Jim Cote <jfcote87@gmail.com>
|
||||
Jim McGrath <jimmc2@gmail.com>
|
||||
Jimmy Zelinskie <jimmyzelinskie@gmail.com>
|
||||
Jingcheng Zhang <diogin@gmail.com>
|
||||
|
|
@ -408,11 +439,13 @@ Josh Goebel <dreamer3@gmail.com>
|
|||
Josh Hoak <jhoak@google.com>
|
||||
Josh Holland <jrh@joshh.co.uk>
|
||||
Joshua Chase <jcjoshuachase@gmail.com>
|
||||
Jostein Stuhaug <js@solidsystem.no>
|
||||
JP Sugarbroad <jpsugar@google.com>
|
||||
JT Olds <jtolds@xnet5.com>
|
||||
Jukka-Pekka Kekkonen <karatepekka@gmail.com>
|
||||
Julian Phillips <julian@quantumfyre.co.uk>
|
||||
Julien Schmidt <google@julienschmidt.com>
|
||||
Jungho Ahn <jhahn@google.com>
|
||||
Justin Nuß <nuss.justin@gmail.com>
|
||||
Kai Backman <kaib@golang.org>
|
||||
Kamil Kisiel <kamil@kamilkisiel.net> <kamil.kisiel@gmail.com>
|
||||
|
|
@ -420,18 +453,23 @@ Kang Hu <hukangustc@gmail.com>
|
|||
Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
|
||||
Katrina Owen <katrina.owen@gmail.com>
|
||||
Kay Zhu <kayzhu@google.com>
|
||||
KB Sriram <kbsriram@google.com>
|
||||
Kei Son <hey.calmdown@gmail.com>
|
||||
Keith Ball <inflatablewoman@gmail.com>
|
||||
Keith Randall <khr@golang.org>
|
||||
Keith Rarick <kr@xph.us>
|
||||
Kelsey Hightower <kelsey.hightower@gmail.com>
|
||||
Kelvin Foo Chuan Lyi <vmirage@gmail.com>
|
||||
Ken Friedenbach <kenliz@cruzio.com>
|
||||
Ken Rockot <ken@oz.gs> <ken.rockot@gmail.com>
|
||||
Ken Sedgwick <ken@bonsai.com>
|
||||
Ken Thompson <ken@golang.org>
|
||||
Kevin Ballard <kevin@sb.org>
|
||||
Kevin Klues <klueska@gmail.com> <klueska@google.com>
|
||||
Kirklin McDonald <kirklin.mcdonald@gmail.com>
|
||||
Konstantin Shaposhnikov <k.shaposhnikov@gmail.com>
|
||||
Kristopher Watts <traetox@gmail.com>
|
||||
Kun Li <likunarmstrong@gmail.com>
|
||||
Kyle Consalus <consalus@gmail.com>
|
||||
Kyle Isom <kyle@gokyle.net>
|
||||
Kyle Lemons <kyle@kylelemons.net> <kevlar@google.com>
|
||||
|
|
@ -444,6 +482,7 @@ Lloyd Dewolf <foolswisdom@gmail.com>
|
|||
Lorenzo Stoakes <lstoakes@gmail.com>
|
||||
Louis Kruger <louisk@google.com>
|
||||
Luca Greco <luca.greco@alcacoop.it>
|
||||
Lucien Stuker <lucien.stuker@gmail.com>
|
||||
Lucio De Re <lucio.dere@gmail.com>
|
||||
Luit van Drongelen <luitvd@gmail.com>
|
||||
Luka Zakrajšek <tr00.g33k@gmail.com>
|
||||
|
|
@ -451,12 +490,14 @@ Luke Curley <qpingu@gmail.com>
|
|||
Luna Duclos <luna.duclos@palmstonegames.com>
|
||||
Luuk van Dijk <lvd@golang.org> <lvd@google.com>
|
||||
Lynn Boger <laboger@linux.vnet.ibm.com>
|
||||
Mal Curtis <mal@mal.co.nz>
|
||||
Manoj Dayaram <platform-dev@moovweb.com> <manoj.dayaram@moovweb.com>
|
||||
Manu Garg <manugarg@google.com>
|
||||
Manuel Mendez <mmendez534@gmail.com>
|
||||
Marc Weistroff <marc@weistroff.net>
|
||||
Marcel van Lohuizen <mpvl@golang.org>
|
||||
Marco Hennings <marco.hennings@freiheit.com>
|
||||
Marga Manterola <marga@google.com>
|
||||
Marius Nuennerich <mnu@google.com>
|
||||
Mark Bucciarelli <mkbucc@gmail.com>
|
||||
Mark Theunissen <mark.theunissen@gmail.com>
|
||||
|
|
@ -470,6 +511,7 @@ Markus Zimmermann <zimmski@gmail.com>
|
|||
Martin Möhrmann <martisch@uos.de>
|
||||
Martin Neubauer <m.ne@gmx.net>
|
||||
Martin Olsson <martin@minimum.se>
|
||||
Marvin Stenger <marvin.stenger94@gmail.com>
|
||||
Mateusz Czapliński <czapkofan@gmail.com>
|
||||
Mathias Beke <git@denbeke.be>
|
||||
Mathieu Lonjaret <mathieu.lonjaret@gmail.com>
|
||||
|
|
@ -504,6 +546,7 @@ Michael Käufl <golang@c.michael-kaeufl.de>
|
|||
Michael Kelly <mjk@google.com>
|
||||
Michael Lewis <mikelikespie@gmail.com>
|
||||
Michael MacInnis <Michael.P.MacInnis@gmail.com>
|
||||
Michael Marineau <michael.marineau@coreos.com>
|
||||
Michael Matloob <matloob@google.com>
|
||||
Michael McGreevy <mcgreevy@golang.org>
|
||||
Michael Pearson <mipearson@gmail.com>
|
||||
|
|
@ -514,6 +557,7 @@ Michael Stapelberg <michael@stapelberg.de> <mstplbrg@googlemail.com>
|
|||
Michael T. Jones <mtj@google.com> <michael.jones@gmail.com>
|
||||
Michael Teichgräber <mteichgraeber@gmx.de> <mt4swm@googlemail.com>
|
||||
Michael Vetter <g.bluehut@gmail.com>
|
||||
Michal Bohuslávek <mbohuslavek@gmail.com>
|
||||
Michal Cierniak <cierniak@google.com>
|
||||
Michał Derkacz <ziutek@lnet.pl>
|
||||
Michalis Kargakis <michaliskargakis@gmail.com>
|
||||
|
|
@ -536,6 +580,8 @@ Nan Deng <monnand@gmail.com>
|
|||
Nathan John Youngman <nj@nathany.com>
|
||||
Nathan P Finch <nate.finch@gmail.com>
|
||||
Nathan Youngman <git@nathany.com>
|
||||
Nathan(yinian) Hu <nathanhu@google.com>
|
||||
Neelesh Chandola <neelesh.c98@gmail.com>
|
||||
Nevins Bartolomeo <nevins.bartolomeo@gmail.com>
|
||||
Nicholas Katsaros <nick@nickkatsaros.com>
|
||||
Nicholas Presta <nick@nickpresta.ca> <nick1presta@gmail.com>
|
||||
|
|
@ -548,7 +594,11 @@ Nicolas Owens <mischief@offblast.org>
|
|||
Nicolas S. Dade <nic.dade@gmail.com>
|
||||
Nigel Kerr <nigel.kerr@gmail.com>
|
||||
Nigel Tao <nigeltao@golang.org>
|
||||
Nikolay Turpitko <nikolay@turpitko.com>
|
||||
Noah Campbell <noahcampbell@gmail.com>
|
||||
Nodir Turakulov <nodir@google.com>
|
||||
Norberto Lopes <nlopes.ml@gmail.com>
|
||||
Oleku Konko <oleku.konko@gmail.com>
|
||||
Oling Cat <olingcat@gmail.com>
|
||||
Oliver Hookins <ohookins@gmail.com>
|
||||
Olivier Antoine <olivier.antoine@gmail.com>
|
||||
|
|
@ -572,12 +622,15 @@ Paul Hammond <paul@paulhammond.org>
|
|||
Paul Lalonde <paul.a.lalonde@gmail.com>
|
||||
Paul Marks <pmarks@google.com>
|
||||
Paul Nasrat <pnasrat@google.com>
|
||||
Paul Rosania <paul.rosania@gmail.com>
|
||||
Paul Sbarra <Sbarra.Paul@gmail.com>
|
||||
Paul Smith <paulsmith@pobox.com> <paulsmith@gmail.com>
|
||||
Paul van Brouwershaven <paul@vanbrouwershaven.com>
|
||||
Pavel Zinovkin <pavel.zinovkin@gmail.com>
|
||||
Pawel Knap <pawelknap88@gmail.com>
|
||||
Pawel Szczur <filemon@google.com>
|
||||
Percy Wegmann <ox.to.a.cart@gmail.com>
|
||||
Perry Abbott <perry.j.abbott@gmail.com>
|
||||
Petar Maymounkov <petarm@gmail.com>
|
||||
Peter Armitage <peter.armitage@gmail.com>
|
||||
Peter Collingbourne <pcc@google.com>
|
||||
|
|
@ -589,12 +642,14 @@ Peter Mundy <go.peter.90@gmail.com>
|
|||
Péter Surányi <speter.go1@gmail.com>
|
||||
Péter Szabó <pts@google.com>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Peter Tseng <ptseng@squareup.com>
|
||||
Peter Waldschmidt <peter@waldschmidt.com>
|
||||
Peter Waller <peter.waller@gmail.com>
|
||||
Peter Weinberger <pjw@golang.org>
|
||||
Peter Williams <pwil3058@gmail.com>
|
||||
Phil Pennock <pdp@golang.org>
|
||||
Philip K. Warren <pkwarren@gmail.com>
|
||||
Pierre Roullon <pierre.roullon@gmail.com>
|
||||
Pieter Droogendijk <pieter@binky.org.uk>
|
||||
Pietro Gagliardi <pietro10@mac.com>
|
||||
Preetam Jinka <pj@preet.am>
|
||||
|
|
@ -605,6 +660,7 @@ Raif S. Naffah <go@naffah-raif.name>
|
|||
Rajat Goel <rajat.goel2010@gmail.com>
|
||||
Raph Levien <raph@google.com>
|
||||
Raul Silvera <rsilvera@google.com>
|
||||
Reinaldo de Souza Jr <juniorz@gmail.com>
|
||||
Rémy Oudompheng <oudomphe@phare.normalesup.org> <remyoudompheng@gmail.com>
|
||||
Richard Barnes <rlb@ipv.sx>
|
||||
Richard Crowley <r@rcrowley.org>
|
||||
|
|
@ -613,15 +669,18 @@ Richard Musiol <mail@richard-musiol.de> <neelance@gmail.com>
|
|||
Rick Arnold <rickarnoldjr@gmail.com>
|
||||
Rick Hudson <rlh@golang.org>
|
||||
Risto Jaakko Saarelma <rsaarelm@gmail.com>
|
||||
Rob Earhart <earhart@google.com>
|
||||
Rob Pike <r@golang.org>
|
||||
Robert Daniel Kortschak <dan.kortschak@adelaide.edu.au>
|
||||
Robert Dinu <r@varp.se>
|
||||
Robert Figueiredo <robfig@gmail.com>
|
||||
Robert Griesemer <gri@golang.org>
|
||||
Robert Hencke <robert.hencke@gmail.com>
|
||||
Robert Iannucci <iannucci@google.com>
|
||||
Robert Obryk <robryk@gmail.com>
|
||||
Robert Sesek <rsesek@google.com>
|
||||
Robert Snedegar <roberts@google.com>
|
||||
Robert Stepanek <robert.stepanek@gmail.com>
|
||||
Robin Eklind <r.eklind.87@gmail.com>
|
||||
Rodrigo Moraes de Oliveira <rodrigo.moraes@gmail.com>
|
||||
Rodrigo Rafael Monti Kochenburger <divoxx@gmail.com>
|
||||
|
|
@ -633,30 +692,39 @@ Ross Light <light@google.com> <rlight2@gmail.com>
|
|||
Rowan Worth <sqweek@gmail.com>
|
||||
Rui Ueyama <ruiu@google.com>
|
||||
Russ Cox <rsc@golang.org>
|
||||
Russell Haering <russellhaering@gmail.com>
|
||||
Ryan Barrett <ryanb@google.com>
|
||||
Ryan Brown <ribrdb@google.com>
|
||||
Ryan Hitchman <hitchmanr@gmail.com>
|
||||
Ryan Lower <rpjlower@gmail.com>
|
||||
Ryan Seys <ryan@ryanseys.com>
|
||||
Ryan Slade <ryanslade@gmail.com>
|
||||
S.Çağlar Onur <caglar@10ur.org>
|
||||
Salmān Aljammāz <s@0x65.net>
|
||||
Sam Thorogood <thorogood@google.com> <sam.thorogood@gmail.com>
|
||||
Sameer Ajmani <sameer@golang.org> <ajmani@gmail.com>
|
||||
Sanjay Menakuru <balasanjay@gmail.com>
|
||||
Sasha Lionheart <lionhearts@google.com>
|
||||
Scott Barron <scott.barron@github.com>
|
||||
Scott Ferguson <scottwferg@gmail.com>
|
||||
Scott Lawrence <bytbox@gmail.com>
|
||||
Scott Schwartz <scotts@golang.org>
|
||||
Scott Van Woudenberg <scottvw@google.com>
|
||||
Sean Burford <sburford@google.com>
|
||||
Sean Dolphin <Sean.Dolphin@kpcompass.com>
|
||||
Sebastien Binet <seb.binet@gmail.com>
|
||||
Sébastien Paolacci <sebastien.paolacci@gmail.com>
|
||||
Sergei Skorobogatov <skorobo@rambler.ru>
|
||||
Sergey 'SnakE' Gromov <snake.scaly@gmail.com>
|
||||
Sergio Luis O. B. Correia <sergio@correia.cc>
|
||||
Shane Hansen <shanemhansen@gmail.com>
|
||||
Shaozhen Ding <dsz0111@gmail.com>
|
||||
Shawn Ledbetter <sledbetter@google.com>
|
||||
Shawn Smith <shawn.p.smith@gmail.com>
|
||||
Shawn Walker-Salas <shawn.walker@oracle.com>
|
||||
Shenghou Ma <minux@golang.org> <minux.ma@gmail.com>
|
||||
Shivakumar GN <shivakumar.gn@gmail.com>
|
||||
Shun Fan <sfan@google.com>
|
||||
Silvan Jegen <s.jegen@gmail.com>
|
||||
Simon Whitehead <chemnova@gmail.com>
|
||||
Sokolov Yura <funny.falcon@gmail.com>
|
||||
|
|
@ -678,14 +746,20 @@ Sven Almgren <sven@tras.se>
|
|||
Szabolcs Nagy <nsz@port70.net>
|
||||
Tad Glines <tad.glines@gmail.com>
|
||||
Taj Khattra <taj.khattra@gmail.com>
|
||||
Takashi Matsuo <tmatsuo@google.com>
|
||||
Takeshi YAMANASHI <9.nashi@gmail.com>
|
||||
Tamir Duberstein <tamird@gmail.com>
|
||||
Tarmigan Casebolt <tarmigan@gmail.com>
|
||||
Taru Karttunen <taruti@taruti.net>
|
||||
Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com>
|
||||
Tetsuo Kiso <tetsuokiso9@gmail.com>
|
||||
Thiago Fransosi Farina <thiago.farina@gmail.com> <tfarina@chromium.org>
|
||||
Thomas Alan Copeland <talan.copeland@gmail.com>
|
||||
Thomas Desrosiers <thomasdesr@gmail.com>
|
||||
Thomas Habets <habets@google.com>
|
||||
Thomas Kappler <tkappler@gmail.com>
|
||||
Tim Cooijmans <timcooijmans@gmail.com>
|
||||
Tim Hockin <thockin@google.com>
|
||||
Timo Savola <timo.savola@gmail.com>
|
||||
Timo Truyts <alkaloid.btx@gmail.com>
|
||||
Tobias Columbus <tobias.columbus@gmail.com> <tobias.columbus@googlemail.com>
|
||||
|
|
@ -696,14 +770,18 @@ Tom Linford <tomlinford@gmail.com>
|
|||
Tom Szymanski <tgs@google.com>
|
||||
Tommy Schaefer <tommy.schaefer@teecom.com>
|
||||
Tor Andersson <tor.andersson@gmail.com>
|
||||
Totoro W <tw19881113@gmail.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Trevor Strohman <trevor.strohman@gmail.com>
|
||||
Trey Tacon <ttacon@gmail.com>
|
||||
Tudor Golubenco <tudor.g@gmail.com>
|
||||
Tw <tw19881113@gmail.com>
|
||||
Tyler Bunnell <tylerbunnell@gmail.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
||||
Ugorji Nwoke <ugorji@gmail.com>
|
||||
Ulf Holm Nielsen <doktor@dyregod.dk>
|
||||
Ulrich Kunitz <uli.kunitz@gmail.com>
|
||||
Uriel Mangado <uriel@berlinblue.org>
|
||||
Uttam C Pawar <uttam.c.pawar@intel.com>
|
||||
Vadim Vygonets <unixdj@gmail.com>
|
||||
Vega Garcia Luis Alfonso <vegacom@gmail.com>
|
||||
Vincent Ambo <tazjin@googlemail.com>
|
||||
|
|
@ -723,7 +801,9 @@ William Orr <will@worrbase.com> <ay1244@gmail.com>
|
|||
Xia Bin <snyh@snyh.org>
|
||||
Xing Xing <mikespook@gmail.com>
|
||||
Yan Zou <yzou@google.com>
|
||||
Yann Kerhervé <yann.kerherve@gmail.com>
|
||||
Yasuhiro Matsumoto <mattn.jp@gmail.com>
|
||||
Yesudeep Mangalapilly <yesudeep@google.com>
|
||||
Yissakhar Z. Beck <yissakhar.beck@gmail.com>
|
||||
Yo-An Lin <yoanlin93@gmail.com>
|
||||
Yongjian Xu <i3dmaster@gmail.com>
|
||||
|
|
@ -731,7 +811,7 @@ Yoshiyuki Kanno <nekotaroh@gmail.com> <yoshiyuki.kanno@stoic.co.jp>
|
|||
Yusuke Kagiwada <block.rxckin.beats@gmail.com>
|
||||
Yuusei Kuwana <kuwana@kumama.org>
|
||||
Yuval Pavel Zholkover <paulzhol@gmail.com>
|
||||
Yves Junqueira <yves.junqueira@gmail.com>
|
||||
Yves Junqueira <yvesj@google.com> <yves.junqueira@gmail.com>
|
||||
Ziad Hatahet <hatahet@gmail.com>
|
||||
Zorion Arrizabalaga <zorionk@gmail.com>
|
||||
申习之 <bronze1man@gmail.com>
|
||||
|
|
|
|||
|
|
@ -12,14 +12,12 @@ in your web browser.
|
|||
Our canonical Git repository is located at https://go.googlesource.com/go.
|
||||
There is a mirror of the repository at https://github.com/golang/go.
|
||||
|
||||
Please report issues here: https://golang.org/issue/new
|
||||
|
||||
Go is the work of hundreds of contributors. We appreciate your help!
|
||||
|
||||
To contribute, please read the contribution guidelines:
|
||||
https://golang.org/doc/contribute.html
|
||||
|
||||
##### Please note that we do not use pull requests.
|
||||
##### Note that we do not accept pull requests and that we use the issue tracker for bug reports and proposals only. Please ask questions on https://forum.golangbridge.org or https://groups.google.com/forum/#!forum/golang-nuts.
|
||||
|
||||
Unless otherwise noted, the Go source files are distributed
|
||||
under the BSD-style license found in the LICENSE file.
|
||||
|
|
|
|||
53
api/next.txt
53
api/next.txt
|
|
@ -0,0 +1,53 @@
|
|||
pkg bufio, method (*Scanner) Buffer([]uint8, int)
|
||||
pkg bufio, var ErrFinalToken error
|
||||
pkg debug/dwarf, const ClassUnknown = 0
|
||||
pkg debug/dwarf, const ClassUnknown Class
|
||||
pkg html/template, func IsTrue(interface{}) (bool, bool)
|
||||
pkg image, func NewNYCbCrA(Rectangle, YCbCrSubsampleRatio) *NYCbCrA
|
||||
pkg image, method (*NYCbCrA) AOffset(int, int) int
|
||||
pkg image, method (*NYCbCrA) At(int, int) color.Color
|
||||
pkg image, method (*NYCbCrA) Bounds() Rectangle
|
||||
pkg image, method (*NYCbCrA) COffset(int, int) int
|
||||
pkg image, method (*NYCbCrA) ColorModel() color.Model
|
||||
pkg image, method (*NYCbCrA) NYCbCrAAt(int, int) color.NYCbCrA
|
||||
pkg image, method (*NYCbCrA) Opaque() bool
|
||||
pkg image, method (*NYCbCrA) SubImage(Rectangle) Image
|
||||
pkg image, method (*NYCbCrA) YCbCrAt(int, int) color.YCbCr
|
||||
pkg image, method (*NYCbCrA) YOffset(int, int) int
|
||||
pkg image, type NYCbCrA struct
|
||||
pkg image, type NYCbCrA struct, A []uint8
|
||||
pkg image, type NYCbCrA struct, AStride int
|
||||
pkg image, type NYCbCrA struct, embedded YCbCr
|
||||
pkg image/color, method (NYCbCrA) RGBA() (uint32, uint32, uint32, uint32)
|
||||
pkg image/color, type NYCbCrA struct
|
||||
pkg image/color, type NYCbCrA struct, A uint8
|
||||
pkg image/color, type NYCbCrA struct, embedded YCbCr
|
||||
pkg image/color, var NYCbCrAModel Model
|
||||
pkg math/big, method (*Float) MarshalText() ([]uint8, error)
|
||||
pkg math/big, method (*Float) UnmarshalText([]uint8) error
|
||||
pkg math/big, method (*Int) Append([]uint8, int) []uint8
|
||||
pkg math/big, method (*Int) Text(int) string
|
||||
pkg math/rand, func Read([]uint8) (int, error)
|
||||
pkg math/rand, method (*Rand) Read([]uint8) (int, error)
|
||||
pkg net, type DNSError struct, IsTemporary bool
|
||||
pkg net/http, const StatusNetworkAuthenticationRequired = 511
|
||||
pkg net/http, const StatusNetworkAuthenticationRequired ideal-int
|
||||
pkg net/http, const StatusPreconditionRequired = 428
|
||||
pkg net/http, const StatusPreconditionRequired ideal-int
|
||||
pkg net/http, const StatusRequestHeaderFieldsTooLarge = 431
|
||||
pkg net/http, const StatusRequestHeaderFieldsTooLarge ideal-int
|
||||
pkg net/http, const StatusTooManyRequests = 429
|
||||
pkg net/http, const StatusTooManyRequests ideal-int
|
||||
pkg net/http/httptest, method (*ResponseRecorder) WriteString(string) (int, error)
|
||||
pkg net/url, method (*Error) Temporary() bool
|
||||
pkg net/url, method (*Error) Timeout() bool
|
||||
pkg strconv, func AppendQuoteRuneToGraphic([]uint8, int32) []uint8
|
||||
pkg strconv, func AppendQuoteToGraphic([]uint8, string) []uint8
|
||||
pkg strconv, func IsGraphic(int32) bool
|
||||
pkg strconv, func QuoteRuneToGraphic(int32) string
|
||||
pkg strconv, func QuoteToGraphic(string) string
|
||||
pkg text/template, func IsTrue(interface{}) (bool, bool)
|
||||
pkg text/template, method (ExecError) Error() string
|
||||
pkg text/template, type ExecError struct
|
||||
pkg text/template, type ExecError struct, Err error
|
||||
pkg text/template, type ExecError struct, Name string
|
||||
|
|
@ -577,7 +577,7 @@ might turn up:
|
|||
<<<<<<< HEAD
|
||||
if arg < 1e9 {
|
||||
=======
|
||||
if arg &lh; 1e10 {
|
||||
if arg < 1e10 {
|
||||
>>>>>>> mcgillicutty
|
||||
largeReduce(arg)
|
||||
</pre>
|
||||
|
|
|
|||
|
|
@ -19,6 +19,16 @@ Go 1.5 is a major release of Go.
|
|||
Read the <a href="/doc/go1.5">Go 1.5 Release Notes</a> for more information.
|
||||
</p>
|
||||
|
||||
<h3 id="go1.5.minor">Minor revisions</h3>
|
||||
|
||||
<p>
|
||||
go1.5.1 (released 2015/09/08) includes bug fixes to the compiler, assembler, and
|
||||
the <code>fmt</code>, <code>net/textproto</code>, <code>net/http</code>, and
|
||||
<code>runtime</code> packages.
|
||||
See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.5.1">Go
|
||||
1.5.1 milestone</a> on our issue tracker for details.
|
||||
</p>
|
||||
|
||||
<h2 id="go1.4">go1.4 (released 2014/12/10)</h2>
|
||||
|
||||
<p>
|
||||
|
|
@ -38,6 +48,11 @@ go1.4.2 (released 2015/02/17) includes bug fixes to the <code>go</code> command,
|
|||
See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.4.2">Go 1.4.2 milestone on our issue tracker</a> for details.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
go1.4.3 (released 2015/09/22) includes security fixes to the <code>net/http</code> package and bug fixes to the <code>runtime</code> package.
|
||||
See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.4.3">Go 1.4.3 milestone on our issue tracker</a> for details.
|
||||
</p>
|
||||
|
||||
<h2 id="go1.3">go1.3 (released 2014/06/18)</h2>
|
||||
|
||||
<p>
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
Tools:
|
||||
|
||||
cmd/go: vendoring enabled by default (https://golang.org/cl/13967/)
|
||||
cmd/go: flags for tests must precede package name if present; also makes it easier to pass flags to test binaries (https://golang.org/cl/14826)
|
||||
|
||||
Ports:
|
||||
|
||||
|
|
@ -8,5 +9,15 @@ NaCl is no longer restricted to pepper_41 (https://golang.org/cl/13958/)
|
|||
|
||||
API additions and behavior changes:
|
||||
|
||||
strconv: QuoteTOGraphic (https://golang.org/cl/14184/)
|
||||
bufio: add Scanner.Buffer (https://golang.org/cl/14599/)
|
||||
bufio: add ErrFinalToken as a sentinel value for Scan's split functions (https://golang.org/cl/14924)
|
||||
fmt: allow any integer type as an argument to the * operator (https://golang.org/cl/14491/)
|
||||
math/rand: add Read (https://golang.org/cl/14522)
|
||||
net/url: make *url.Error implement net.Error (https://golang.org/cl/15672)
|
||||
strconv: QuoteToGraphic (https://golang.org/cl/14184/)
|
||||
text/template: ExecError (https://golang.org/cl/13957/)
|
||||
text/template: trimming spaces (https://golang.org/cl/14391/)
|
||||
text/template: Funcs check names (https://golang.org/cl/14562/)
|
||||
text/template: IsTrue (https://golang.org/cl/14562/)
|
||||
text/template: blocks and permit redefinition (https://golang.org/cl/14005)
|
||||
time: allow one and two-digit days of the month during Parse (https://golang.org/cl/14123/)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
<!--{
|
||||
"Title": "The Go Programming Language Specification",
|
||||
"Subtitle": "Version of August 5, 2015",
|
||||
"Subtitle": "Version of September 24, 2015",
|
||||
"Path": "/ref/spec"
|
||||
}-->
|
||||
|
||||
|
|
@ -2210,9 +2210,8 @@ math.Sin // denotes the Sin function in package math
|
|||
<p>
|
||||
Composite literals construct values for structs, arrays, slices, and maps
|
||||
and create a new value each time they are evaluated.
|
||||
They consist of the type of the value
|
||||
followed by a brace-bound list of composite elements. An element may be
|
||||
a single expression or a key-value pair.
|
||||
They consist of the type of the literal followed by a brace-bound list of elements.
|
||||
Each element may optionally be preceded by a corresponding key.
|
||||
</p>
|
||||
|
||||
<pre class="ebnf">
|
||||
|
|
@ -2220,19 +2219,19 @@ CompositeLit = LiteralType LiteralValue .
|
|||
LiteralType = StructType | ArrayType | "[" "..." "]" ElementType |
|
||||
SliceType | MapType | TypeName .
|
||||
LiteralValue = "{" [ ElementList [ "," ] ] "}" .
|
||||
ElementList = Element { "," Element } .
|
||||
Element = [ Key ":" ] Value .
|
||||
ElementList = KeyedElement { "," KeyedElement } .
|
||||
KeyedElement = [ Key ":" ] Element .
|
||||
Key = FieldName | Expression | LiteralValue .
|
||||
FieldName = identifier .
|
||||
Value = Expression | LiteralValue .
|
||||
Element = Expression | LiteralValue .
|
||||
</pre>
|
||||
|
||||
<p>
|
||||
The LiteralType must be a struct, array, slice, or map type
|
||||
The LiteralType's underlying type must be a struct, array, slice, or map type
|
||||
(the grammar enforces this constraint except when the type is given
|
||||
as a TypeName).
|
||||
The types of the expressions must be <a href="#Assignability">assignable</a>
|
||||
to the respective field, element, and key types of the LiteralType;
|
||||
The types of the elements and keys must be <a href="#Assignability">assignable</a>
|
||||
to the respective field, element, and key types of the literal type;
|
||||
there is no additional conversion.
|
||||
The key is interpreted as a field name for struct literals,
|
||||
an index for array and slice literals, and a key for map literals.
|
||||
|
|
@ -2245,7 +2244,7 @@ constant key value.
|
|||
For struct literals the following rules apply:
|
||||
</p>
|
||||
<ul>
|
||||
<li>A key must be a field name declared in the LiteralType.
|
||||
<li>A key must be a field name declared in the struct type.
|
||||
</li>
|
||||
<li>An element list that does not contain any keys must
|
||||
list an element for each struct field in the
|
||||
|
|
@ -2307,7 +2306,7 @@ var pointer *Point3D = &Point3D{y: 1000}
|
|||
</pre>
|
||||
|
||||
<p>
|
||||
The length of an array literal is the length specified in the LiteralType.
|
||||
The length of an array literal is the length specified in the literal type.
|
||||
If fewer elements than the length are provided in the literal, the missing
|
||||
elements are set to the zero value for the array element type.
|
||||
It is an error to provide elements with index values outside the index range
|
||||
|
|
|
|||
|
|
@ -167,7 +167,7 @@ Then clone the repository and check out the latest release tag:</p>
|
|||
<pre>
|
||||
$ git clone https://go.googlesource.com/go
|
||||
$ cd go
|
||||
$ git checkout go1.5
|
||||
$ git checkout go1.5.1
|
||||
</pre>
|
||||
|
||||
<h2 id="head">(Optional) Switch to the master branch</h2>
|
||||
|
|
@ -346,7 +346,7 @@ New releases are announced on the
|
|||
<a href="//groups.google.com/group/golang-announce">golang-announce</a>
|
||||
mailing list.
|
||||
Each announcement mentions the latest release tag, for instance,
|
||||
<code>go1.5</code>.
|
||||
<code>go1.5.1</code>.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
|
|
|
|||
15
misc/cgo/errors/issue11097a.go
Normal file
15
misc/cgo/errors/issue11097a.go
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
//enum test { foo, bar };
|
||||
*/
|
||||
import "C"
|
||||
|
||||
func main() {
|
||||
var a = C.enum_test(1) // ERROR HERE
|
||||
_ = a
|
||||
}
|
||||
15
misc/cgo/errors/issue11097b.go
Normal file
15
misc/cgo/errors/issue11097b.go
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
//enum test { foo, bar };
|
||||
*/
|
||||
import "C"
|
||||
|
||||
func main() {
|
||||
p := new(C.enum_test) // ERROR HERE
|
||||
_ = p
|
||||
}
|
||||
|
|
@ -31,6 +31,8 @@ check err2.go
|
|||
check err3.go
|
||||
check issue7757.go
|
||||
check issue8442.go
|
||||
check issue11097a.go
|
||||
check issue11097b.go
|
||||
|
||||
rm -rf errs _obj
|
||||
exit 0
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ func testBuildID(t *testing.T) {
|
|||
if os.IsNotExist(err) {
|
||||
t.Skip("no /proc/self/exe")
|
||||
}
|
||||
t.Fatalf("opening /proc/self/exe: ", err)
|
||||
t.Fatal("opening /proc/self/exe: ", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ func testCallbackCallers(t *testing.T) {
|
|||
for i := 0; i < n; i++ {
|
||||
f := runtime.FuncForPC(pc[i])
|
||||
if f == nil {
|
||||
t.Fatalf("expected non-nil Func for pc %p", pc[i])
|
||||
t.Fatalf("expected non-nil Func for pc %d", pc[i])
|
||||
}
|
||||
fname := f.Name()
|
||||
// Remove the prepended pathname from automatically
|
||||
|
|
|
|||
|
|
@ -65,5 +65,6 @@ func Test9026(t *testing.T) { test9026(t) }
|
|||
func Test9557(t *testing.T) { test9557(t) }
|
||||
func Test10303(t *testing.T) { test10303(t, 10) }
|
||||
func Test11925(t *testing.T) { test11925(t) }
|
||||
func Test12030(t *testing.T) { test12030(t) }
|
||||
|
||||
func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) }
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ func testSetEnv(t *testing.T) {
|
|||
keyc := C.CString(key)
|
||||
defer C.free(unsafe.Pointer(keyc))
|
||||
v := C.getenv(keyc)
|
||||
if v == (*C.char)(unsafe.Pointer(uintptr(0))) {
|
||||
if uintptr(unsafe.Pointer(v)) == 0 {
|
||||
t.Fatal("getenv returned NULL")
|
||||
}
|
||||
vs := C.GoString(v)
|
||||
|
|
|
|||
35
misc/cgo/test/issue12030.go
Normal file
35
misc/cgo/test/issue12030.go
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Issue 12030. sprintf is defined in both ntdll and msvcrt,
|
||||
// Normally we want the one in the msvcrt.
|
||||
|
||||
package cgotest
|
||||
|
||||
/*
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
void issue12030conv(char *buf, double x) {
|
||||
sprintf(buf, "d=%g", x);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func test12030(t *testing.T) {
|
||||
buf := (*C.char)(C.malloc(256))
|
||||
defer C.free(unsafe.Pointer(buf))
|
||||
for _, f := range []float64{1.0, 2.0, 3.14} {
|
||||
C.issue12030conv(buf, C.double(f))
|
||||
got := C.GoString(buf)
|
||||
if want := fmt.Sprintf("d=%g", f); got != want {
|
||||
t.Fatalf("C.sprintf failed for %g: %q != %q", f, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -3,7 +3,7 @@
|
|||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Test that pthread_cancel works as expected
|
||||
// (NPTL uses SIGRTMIN to implement thread cancellation)
|
||||
// (NPTL uses SIGRTMIN to implement thread cancelation)
|
||||
// See https://golang.org/issue/6997
|
||||
package cgotest
|
||||
|
||||
|
|
@ -32,7 +32,7 @@ func test6997(t *testing.T) {
|
|||
select {
|
||||
case r = <-c:
|
||||
if r == 0 {
|
||||
t.Error("pthread finished but wasn't cancelled??")
|
||||
t.Error("pthread finished but wasn't canceled??")
|
||||
}
|
||||
case <-time.After(30 * time.Second):
|
||||
t.Error("hung in pthread_cancel/pthread_join")
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ var bad7665 unsafe.Pointer = C.f7665
|
|||
var good7665 uintptr = uintptr(C.f7665)
|
||||
|
||||
func test7665(t *testing.T) {
|
||||
if bad7665 == nil || bad7665 != unsafe.Pointer(good7665) {
|
||||
if bad7665 == nil || uintptr(bad7665) != good7665 {
|
||||
t.Errorf("ptrs = %p, %#x, want same non-nil pointer", bad7665, good7665)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·RewindAndSetgid(SB),NOSPLIT,$-8-0
|
||||
TEXT ·RewindAndSetgid(SB),NOSPLIT|NOFRAME,$0-0
|
||||
// Rewind stack pointer so anything that happens on the stack
|
||||
// will clobber the test pattern created by the caller
|
||||
ADD $(1024 * 8), R1
|
||||
|
|
|
|||
|
|
@ -14,11 +14,14 @@ package cgotest
|
|||
import "C"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func testSetgid(t *testing.T) {
|
||||
func runTestSetgid() bool {
|
||||
c := make(chan bool)
|
||||
go func() {
|
||||
C.setgid(0)
|
||||
|
|
@ -26,7 +29,21 @@ func testSetgid(t *testing.T) {
|
|||
}()
|
||||
select {
|
||||
case <-c:
|
||||
return true
|
||||
case <-time.After(5 * time.Second):
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func testSetgid(t *testing.T) {
|
||||
if !runTestSetgid() {
|
||||
t.Error("setgid hung")
|
||||
}
|
||||
|
||||
// Now try it again after using signal.Notify.
|
||||
signal.Notify(make(chan os.Signal, 1), syscall.SIGINT)
|
||||
if !runTestSetgid() {
|
||||
t.Error("setgid hung after signal.Notify")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ int main(void) {
|
|||
n = read(fd, buf, sizeof buf);
|
||||
if (n >= 0)
|
||||
break;
|
||||
if (errno != EBADF) {
|
||||
if (errno != EBADF && errno != EINVAL) {
|
||||
fprintf(stderr, "BUG: read: %s\n", strerror(errno));
|
||||
return 2;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ goarch=$(go env GOARCH)
|
|||
# Directory where cgo headers and outputs will be installed.
|
||||
# The installation directory format varies depending on the platform.
|
||||
installdir=pkg/${goos}_${goarch}_testcshared_shared
|
||||
if [ "${goos}/${goarch}" == "android/arm" ] || [ "${goos}/${goarch}" == "darwin/amd64" ]; then
|
||||
if [ "${goos}/${goarch}" == "darwin/amd64" ]; then
|
||||
installdir=pkg/${goos}_${goarch}_testcshared
|
||||
fi
|
||||
|
||||
|
|
@ -81,9 +81,21 @@ GOPATH=$(pwd) go install -buildmode=c-shared $suffix libgo
|
|||
GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo.$libext src/libgo/libgo.go
|
||||
binpush libgo.$libext
|
||||
|
||||
if [ "$goos" == "linux" ]; then
|
||||
if readelf -d libgo.$libext | grep TEXTREL >/dev/null; then
|
||||
echo "libgo.$libext has TEXTREL set"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
GOGCCFLAGS=$(go env GOGCCFLAGS)
|
||||
if [ "$goos" == "android" ]; then
|
||||
GOGCCFLAGS="${GOGCCFLAGS} -pie"
|
||||
fi
|
||||
|
||||
# test0: exported symbols in shared lib are accessible.
|
||||
# TODO(iant): using _shared here shouldn't really be necessary.
|
||||
$(go env CC) $(go env GOGCCFLAGS) -I ${installdir} -o testp main0.c libgo.$libext
|
||||
$(go env CC) ${GOGCCFLAGS} -I ${installdir} -o testp main0.c libgo.$libext
|
||||
binpush testp
|
||||
|
||||
output=$(run LD_LIBRARY_PATH=. ./testp)
|
||||
|
|
@ -93,7 +105,7 @@ if [ "$output" != "PASS" ]; then
|
|||
fi
|
||||
|
||||
# test1: shared library can be dynamically loaded and exported symbols are accessible.
|
||||
$(go env CC) $(go env GOGCCFLAGS) -o testp main1.c -ldl
|
||||
$(go env CC) ${GOGCCFLAGS} -o testp main1.c -ldl
|
||||
binpush testp
|
||||
output=$(run ./testp ./libgo.$libext)
|
||||
if [ "$output" != "PASS" ]; then
|
||||
|
|
@ -108,7 +120,7 @@ linkflags="-Wl,--no-as-needed"
|
|||
if [ "$goos" == "darwin" ]; then
|
||||
linkflags=""
|
||||
fi
|
||||
$(go env CC) $(go env GOGCCFLAGS) -o testp2 main2.c $linkflags libgo2.$libext
|
||||
$(go env CC) ${GOGCCFLAGS} -o testp2 main2.c $linkflags libgo2.$libext
|
||||
binpush testp2
|
||||
output=$(run LD_LIBRARY_PATH=. ./testp2)
|
||||
if [ "$output" != "PASS" ]; then
|
||||
|
|
@ -118,7 +130,7 @@ fi
|
|||
|
||||
# test3: tests main.main is exported on android.
|
||||
if [ "$goos" == "android" ]; then
|
||||
$(go env CC) $(go env GOGCCFLAGS) -o testp3 main3.c -ldl
|
||||
$(go env CC) ${GOGCCFLAGS} -o testp3 main3.c -ldl
|
||||
binpush testp3
|
||||
output=$(run ./testp ./libgo.so)
|
||||
if [ "$output" != "PASS" ]; then
|
||||
|
|
|
|||
34
misc/cgo/testsanitizers/msan.go
Normal file
34
misc/cgo/testsanitizers/msan.go
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
package main
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -fsanitize=memory
|
||||
#cgo LDFLAGS: -fsanitize=memory
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
void f(int32_t *p, int n) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
p[i] = (int32_t)i;
|
||||
}
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func main() {
|
||||
a := make([]int32, 10)
|
||||
C.f((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a)))
|
||||
for i, v := range a {
|
||||
if i != int(v) {
|
||||
fmt.Println("bad %d: %v\n", i, a)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
34
misc/cgo/testsanitizers/test.bash
Executable file
34
misc/cgo/testsanitizers/test.bash
Executable file
|
|
@ -0,0 +1,34 @@
|
|||
#!/usr/bin/env bash
|
||||
# Copyright 2015 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
# This directory is intended to test the use of Go with sanitizers
|
||||
# like msan, asan, etc. See https://github.com/google/sanitizers .
|
||||
|
||||
set -e
|
||||
|
||||
# The sanitizers were originally developed with clang, so prefer it.
|
||||
CC=cc
|
||||
if test "$(type -p clang)" != ""; then
|
||||
CC=clang
|
||||
fi
|
||||
export CC
|
||||
|
||||
if $CC -fsanitize=memory 2>&1 | grep "unrecognized" >& /dev/null; then
|
||||
echo "skipping msan test: -fsanitize=memory not supported"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# The memory sanitizer in versions of clang before 3.6 don't work with Go.
|
||||
if $CC --version | grep clang >& /dev/null; then
|
||||
ver=$($CC --version | sed -e 's/.* version \([0-9.-]*\).*/\1/')
|
||||
major=$(echo $ver | sed -e 's/\([0-9]*\).*/\1/')
|
||||
minor=$(echo $ver | sed -e 's/[0-9]*\.\([0-9]*\).*/\1/')
|
||||
if test $major -lt 3 || test $major -eq 3 -a $minor -lt 6; then
|
||||
echo "skipping msan test; clang version $major.$minor older than 3.6"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
go run msan.go
|
||||
|
|
@ -163,6 +163,45 @@ func TestSOBuilt(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func hasDynTag(f *elf.File, tag elf.DynTag) bool {
|
||||
ds := f.SectionByType(elf.SHT_DYNAMIC)
|
||||
if ds == nil {
|
||||
return false
|
||||
}
|
||||
d, err := ds.Data()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for len(d) > 0 {
|
||||
var t elf.DynTag
|
||||
switch f.Class {
|
||||
case elf.ELFCLASS32:
|
||||
t = elf.DynTag(f.ByteOrder.Uint32(d[0:4]))
|
||||
d = d[8:]
|
||||
case elf.ELFCLASS64:
|
||||
t = elf.DynTag(f.ByteOrder.Uint64(d[0:8]))
|
||||
d = d[16:]
|
||||
}
|
||||
if t == tag {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// The shared library does not have relocations against the text segment.
|
||||
func TestNoTextrel(t *testing.T) {
|
||||
sopath := filepath.Join(gorootInstallDir, soname)
|
||||
f, err := elf.Open(sopath)
|
||||
if err != nil {
|
||||
t.Fatal("elf.Open failed: ", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if hasDynTag(f, elf.DT_TEXTREL) {
|
||||
t.Errorf("%s has DT_TEXTREL set", soname)
|
||||
}
|
||||
}
|
||||
|
||||
// The install command should have created a "shlibname" file for the
|
||||
// listed packages (and runtime/cgo) indicating the name of the shared
|
||||
// library containing it.
|
||||
|
|
|
|||
|
|
@ -55,6 +55,9 @@ func detectDevID() string {
|
|||
if !bytes.Contains(line, []byte("iPhone Developer")) {
|
||||
continue
|
||||
}
|
||||
if bytes.Contains(line, []byte("REVOKED")) {
|
||||
continue
|
||||
}
|
||||
fields := bytes.Fields(line)
|
||||
return string(fields[1])
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import (
|
|||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -49,12 +50,36 @@ type regFileReader struct {
|
|||
nb int64 // number of unread bytes for current file entry
|
||||
}
|
||||
|
||||
// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive.
|
||||
// A sparseFileReader is a numBytesReader for reading sparse file data from a
|
||||
// tar archive.
|
||||
type sparseFileReader struct {
|
||||
rfr *regFileReader // reads the sparse-encoded file data
|
||||
sp []sparseEntry // the sparse map for the file
|
||||
pos int64 // keeps track of file position
|
||||
tot int64 // total size of the file
|
||||
rfr numBytesReader // Reads the sparse-encoded file data
|
||||
sp []sparseEntry // The sparse map for the file
|
||||
pos int64 // Keeps track of file position
|
||||
total int64 // Total size of the file
|
||||
}
|
||||
|
||||
// A sparseEntry holds a single entry in a sparse file's sparse map.
|
||||
//
|
||||
// Sparse files are represented using a series of sparseEntrys.
|
||||
// Despite the name, a sparseEntry represents an actual data fragment that
|
||||
// references data found in the underlying archive stream. All regions not
|
||||
// covered by a sparseEntry are logically filled with zeros.
|
||||
//
|
||||
// For example, if the underlying raw file contains the 10-byte data:
|
||||
// var compactData = "abcdefgh"
|
||||
//
|
||||
// And the sparse map has the following entries:
|
||||
// var sp = []sparseEntry{
|
||||
// {offset: 2, numBytes: 5} // Data fragment for [2..7]
|
||||
// {offset: 18, numBytes: 3} // Data fragment for [18..21]
|
||||
// }
|
||||
//
|
||||
// Then the content of the resulting sparse file with a "real" size of 25 is:
|
||||
// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
|
||||
type sparseEntry struct {
|
||||
offset int64 // Starting position of the fragment
|
||||
numBytes int64 // Length of the fragment
|
||||
}
|
||||
|
||||
// Keywords for GNU sparse files in a PAX extended header
|
||||
|
|
@ -128,7 +153,10 @@ func (tr *Reader) Next() (*Header, error) {
|
|||
if sp != nil {
|
||||
// Current file is a PAX format GNU sparse file.
|
||||
// Set the current file reader to a sparse file reader.
|
||||
tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
|
||||
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
|
||||
if tr.err != nil {
|
||||
return nil, tr.err
|
||||
}
|
||||
}
|
||||
return hdr, nil
|
||||
case TypeGNULongName:
|
||||
|
|
@ -137,18 +165,24 @@ func (tr *Reader) Next() (*Header, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
hdr, tr.err = tr.Next()
|
||||
if tr.err != nil {
|
||||
return nil, tr.err
|
||||
}
|
||||
hdr.Name = cString(realname)
|
||||
return hdr, err
|
||||
return hdr, nil
|
||||
case TypeGNULongLink:
|
||||
// We have a GNU long link header.
|
||||
realname, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
hdr, tr.err = tr.Next()
|
||||
if tr.err != nil {
|
||||
return nil, tr.err
|
||||
}
|
||||
hdr.Linkname = cString(realname)
|
||||
return hdr, err
|
||||
return hdr, nil
|
||||
}
|
||||
return hdr, tr.err
|
||||
}
|
||||
|
|
@ -541,21 +575,17 @@ func (tr *Reader) readHeader() *Header {
|
|||
if tr.err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Current file is a GNU sparse file. Update the current file reader.
|
||||
tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
|
||||
tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
|
||||
if tr.err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return hdr
|
||||
}
|
||||
|
||||
// A sparseEntry holds a single entry in a sparse file's sparse map.
|
||||
// A sparse entry indicates the offset and size in a sparse file of a
|
||||
// block of data.
|
||||
type sparseEntry struct {
|
||||
offset int64
|
||||
numBytes int64
|
||||
}
|
||||
|
||||
// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
|
||||
// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
|
||||
// then one or more extension headers are used to store the rest of the sparse map.
|
||||
|
|
@ -688,40 +718,37 @@ func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
|
|||
return sp, nil
|
||||
}
|
||||
|
||||
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1.
|
||||
// The sparse map is stored in the PAX headers.
|
||||
func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) {
|
||||
// Get number of entries
|
||||
numEntriesStr, ok := headers[paxGNUSparseNumBlocks]
|
||||
if !ok {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0)
|
||||
if err != nil {
|
||||
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
|
||||
// version 0.1. The sparse map is stored in the PAX headers.
|
||||
func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
|
||||
// Get number of entries.
|
||||
// Use integer overflow resistant math to check this.
|
||||
numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
|
||||
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
|
||||
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
|
||||
sparseMap := strings.Split(headers[paxGNUSparseMap], ",")
|
||||
|
||||
// There should be two numbers in sparseMap for each entry
|
||||
// There should be two numbers in sparseMap for each entry.
|
||||
sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
|
||||
if int64(len(sparseMap)) != 2*numEntries {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
|
||||
// Loop through the entries in the sparse map
|
||||
// Loop through the entries in the sparse map.
|
||||
// numEntries is trusted now.
|
||||
sp := make([]sparseEntry, 0, numEntries)
|
||||
for i := int64(0); i < numEntries; i++ {
|
||||
offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0)
|
||||
offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
|
||||
if err != nil {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0)
|
||||
numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
|
||||
}
|
||||
|
||||
return sp, nil
|
||||
}
|
||||
|
||||
|
|
@ -739,9 +766,13 @@ func (tr *Reader) numBytes() int64 {
|
|||
// It returns 0, io.EOF when it reaches the end of that entry,
|
||||
// until Next is called to advance to the next entry.
|
||||
func (tr *Reader) Read(b []byte) (n int, err error) {
|
||||
if tr.err != nil {
|
||||
return 0, tr.err
|
||||
}
|
||||
if tr.curr == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n, err = tr.curr.Read(b)
|
||||
if err != nil && err != io.EOF {
|
||||
tr.err = err
|
||||
|
|
@ -771,9 +802,33 @@ func (rfr *regFileReader) numBytes() int64 {
|
|||
return rfr.nb
|
||||
}
|
||||
|
||||
// readHole reads a sparse file hole ending at offset toOffset
|
||||
func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int {
|
||||
n64 := toOffset - sfr.pos
|
||||
// newSparseFileReader creates a new sparseFileReader, but validates all of the
|
||||
// sparse entries before doing so.
|
||||
func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
|
||||
if total < 0 {
|
||||
return nil, ErrHeader // Total size cannot be negative
|
||||
}
|
||||
|
||||
// Validate all sparse entries. These are the same checks as performed by
|
||||
// the BSD tar utility.
|
||||
for i, s := range sp {
|
||||
switch {
|
||||
case s.offset < 0 || s.numBytes < 0:
|
||||
return nil, ErrHeader // Negative values are never okay
|
||||
case s.offset > math.MaxInt64-s.numBytes:
|
||||
return nil, ErrHeader // Integer overflow with large length
|
||||
case s.offset+s.numBytes > total:
|
||||
return nil, ErrHeader // Region extends beyond the "real" size
|
||||
case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
|
||||
return nil, ErrHeader // Regions can't overlap and must be in order
|
||||
}
|
||||
}
|
||||
return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
|
||||
}
|
||||
|
||||
// readHole reads a sparse hole ending at endOffset.
|
||||
func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
|
||||
n64 := endOffset - sfr.pos
|
||||
if n64 > int64(len(b)) {
|
||||
n64 = int64(len(b))
|
||||
}
|
||||
|
|
@ -787,49 +842,54 @@ func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int {
|
|||
|
||||
// Read reads the sparse file data in expanded form.
|
||||
func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
|
||||
if len(sfr.sp) == 0 {
|
||||
// No more data fragments to read from.
|
||||
if sfr.pos < sfr.tot {
|
||||
// We're in the last hole
|
||||
n = sfr.readHole(b, sfr.tot)
|
||||
return
|
||||
}
|
||||
// Otherwise, we're at the end of the file
|
||||
return 0, io.EOF
|
||||
}
|
||||
if sfr.tot < sfr.sp[0].offset {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
if sfr.pos < sfr.sp[0].offset {
|
||||
// We're in a hole
|
||||
n = sfr.readHole(b, sfr.sp[0].offset)
|
||||
return
|
||||
// Skip past all empty fragments.
|
||||
for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
|
||||
sfr.sp = sfr.sp[1:]
|
||||
}
|
||||
|
||||
// We're not in a hole, so we'll read from the next data fragment
|
||||
posInFragment := sfr.pos - sfr.sp[0].offset
|
||||
bytesLeft := sfr.sp[0].numBytes - posInFragment
|
||||
// If there are no more fragments, then it is possible that there
|
||||
// is one last sparse hole.
|
||||
if len(sfr.sp) == 0 {
|
||||
// This behavior matches the BSD tar utility.
|
||||
// However, GNU tar stops returning data even if sfr.total is unmet.
|
||||
if sfr.pos < sfr.total {
|
||||
return sfr.readHole(b, sfr.total), nil
|
||||
}
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// In front of a data fragment, so read a hole.
|
||||
if sfr.pos < sfr.sp[0].offset {
|
||||
return sfr.readHole(b, sfr.sp[0].offset), nil
|
||||
}
|
||||
|
||||
// In a data fragment, so read from it.
|
||||
// This math is overflow free since we verify that offset and numBytes can
|
||||
// be safely added when creating the sparseFileReader.
|
||||
endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
|
||||
bytesLeft := endPos - sfr.pos // Bytes left in fragment
|
||||
if int64(len(b)) > bytesLeft {
|
||||
b = b[0:bytesLeft]
|
||||
b = b[:bytesLeft]
|
||||
}
|
||||
|
||||
n, err = sfr.rfr.Read(b)
|
||||
sfr.pos += int64(n)
|
||||
|
||||
if int64(n) == bytesLeft {
|
||||
// We're done with this fragment
|
||||
sfr.sp = sfr.sp[1:]
|
||||
if err == io.EOF {
|
||||
if sfr.pos < endPos {
|
||||
err = io.ErrUnexpectedEOF // There was supposed to be more data
|
||||
} else if sfr.pos < sfr.total {
|
||||
err = nil // There is still an implicit sparse hole at the end
|
||||
}
|
||||
}
|
||||
|
||||
if err == io.EOF && sfr.pos < sfr.tot {
|
||||
// We reached the end of the last fragment's data, but there's a final hole
|
||||
err = nil
|
||||
if sfr.pos == endPos {
|
||||
sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
|
||||
}
|
||||
return
|
||||
return n, err
|
||||
}
|
||||
|
||||
// numBytes returns the number of bytes left to read in the sparse file's
|
||||
// sparse-encoded data in the tar archive.
|
||||
func (sfr *sparseFileReader) numBytes() int64 {
|
||||
return sfr.rfr.nb
|
||||
return sfr.rfr.numBytes()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
|
@ -18,9 +19,10 @@ import (
|
|||
)
|
||||
|
||||
type untarTest struct {
|
||||
file string
|
||||
headers []*Header
|
||||
cksums []string
|
||||
file string // Test input file
|
||||
headers []*Header // Expected output headers
|
||||
chksums []string // MD5 checksum of files, leave as nil if not checked
|
||||
err error // Expected error to occur
|
||||
}
|
||||
|
||||
var gnuTarTest = &untarTest{
|
||||
|
|
@ -49,7 +51,7 @@ var gnuTarTest = &untarTest{
|
|||
Gname: "eng",
|
||||
},
|
||||
},
|
||||
cksums: []string{
|
||||
chksums: []string{
|
||||
"e38b27eaccb4391bdec553a7f3ae6b2f",
|
||||
"c65bd2e50a56a2138bf1716f2fd56fe9",
|
||||
},
|
||||
|
|
@ -129,7 +131,7 @@ var sparseTarTest = &untarTest{
|
|||
Devminor: 0,
|
||||
},
|
||||
},
|
||||
cksums: []string{
|
||||
chksums: []string{
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
"6f53234398c2449fe67c1812d993012f",
|
||||
|
|
@ -286,37 +288,101 @@ var untarTests = []*untarTest{
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
file: "testdata/neg-size.tar",
|
||||
err: ErrHeader,
|
||||
},
|
||||
{
|
||||
file: "testdata/issue10968.tar",
|
||||
err: ErrHeader,
|
||||
},
|
||||
{
|
||||
file: "testdata/issue11169.tar",
|
||||
// TODO(dsnet): Currently the library does not detect that this file is
|
||||
// malformed. Instead it incorrectly believes that file just ends.
|
||||
// At least the library doesn't crash anymore.
|
||||
// err: ErrHeader,
|
||||
},
|
||||
{
|
||||
file: "testdata/issue12435.tar",
|
||||
// TODO(dsnet): Currently the library does not detect that this file is
|
||||
// malformed. Instead, it incorrectly believes that file just ends.
|
||||
// At least the library doesn't crash anymore.
|
||||
// err: ErrHeader,
|
||||
},
|
||||
}
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
testLoop:
|
||||
for i, test := range untarTests {
|
||||
f, err := os.Open(test.file)
|
||||
for i, v := range untarTests {
|
||||
f, err := os.Open(v.file)
|
||||
if err != nil {
|
||||
t.Errorf("test %d: Unexpected error: %v", i, err)
|
||||
t.Errorf("file %s, test %d: unexpected error: %v", v.file, i, err)
|
||||
continue
|
||||
}
|
||||
defer f.Close()
|
||||
tr := NewReader(f)
|
||||
for j, header := range test.headers {
|
||||
hdr, err := tr.Next()
|
||||
if err != nil || hdr == nil {
|
||||
t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
|
||||
f.Close()
|
||||
continue testLoop
|
||||
}
|
||||
if !reflect.DeepEqual(*hdr, *header) {
|
||||
t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
|
||||
i, j, *hdr, *header)
|
||||
}
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
|
||||
// Capture all headers and checksums.
|
||||
var (
|
||||
tr = NewReader(f)
|
||||
hdrs []*Header
|
||||
chksums []string
|
||||
rdbuf = make([]byte, 8)
|
||||
)
|
||||
for {
|
||||
var hdr *Header
|
||||
hdr, err = tr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
continue testLoop
|
||||
err = nil // Expected error
|
||||
}
|
||||
if hdr != nil || err != nil {
|
||||
t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, hdr, err)
|
||||
break
|
||||
}
|
||||
hdrs = append(hdrs, hdr)
|
||||
|
||||
if v.chksums == nil {
|
||||
continue
|
||||
}
|
||||
h := md5.New()
|
||||
_, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
chksums = append(chksums, fmt.Sprintf("%x", h.Sum(nil)))
|
||||
}
|
||||
|
||||
for j, hdr := range hdrs {
|
||||
if j >= len(v.headers) {
|
||||
t.Errorf("file %s, test %d, entry %d: unexpected header:\ngot %+v",
|
||||
v.file, i, j, *hdr)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(*hdr, *v.headers[j]) {
|
||||
t.Errorf("file %s, test %d, entry %d: incorrect header:\ngot %+v\nwant %+v",
|
||||
v.file, i, j, *hdr, *v.headers[j])
|
||||
}
|
||||
}
|
||||
if len(hdrs) != len(v.headers) {
|
||||
t.Errorf("file %s, test %d: got %d headers, want %d headers",
|
||||
v.file, i, len(hdrs), len(v.headers))
|
||||
}
|
||||
|
||||
for j, sum := range chksums {
|
||||
if j >= len(v.chksums) {
|
||||
t.Errorf("file %s, test %d, entry %d: unexpected sum: got %s",
|
||||
v.file, i, j, sum)
|
||||
continue
|
||||
}
|
||||
if sum != v.chksums[j] {
|
||||
t.Errorf("file %s, test %d, entry %d: incorrect checksum: got %s, want %s",
|
||||
v.file, i, j, sum, v.chksums[j])
|
||||
}
|
||||
}
|
||||
|
||||
if err != v.err {
|
||||
t.Errorf("file %s, test %d: unexpected error: got %v, want %v",
|
||||
v.file, i, err, v.err)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -356,60 +422,6 @@ func TestPartialRead(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIncrementalRead(t *testing.T) {
|
||||
test := gnuTarTest
|
||||
f, err := os.Open(test.file)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
tr := NewReader(f)
|
||||
|
||||
headers := test.headers
|
||||
cksums := test.cksums
|
||||
nread := 0
|
||||
|
||||
// loop over all files
|
||||
for ; ; nread++ {
|
||||
hdr, err := tr.Next()
|
||||
if hdr == nil || err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
// check the header
|
||||
if !reflect.DeepEqual(*hdr, *headers[nread]) {
|
||||
t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
|
||||
*hdr, headers[nread])
|
||||
}
|
||||
|
||||
// read file contents in little chunks EOF,
|
||||
// checksumming all the way
|
||||
h := md5.New()
|
||||
rdbuf := make([]uint8, 8)
|
||||
for {
|
||||
nr, err := tr.Read(rdbuf)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Read: unexpected error %v\n", err)
|
||||
break
|
||||
}
|
||||
h.Write(rdbuf[0:nr])
|
||||
}
|
||||
// verify checksum
|
||||
have := fmt.Sprintf("%x", h.Sum(nil))
|
||||
want := cksums[nread]
|
||||
if want != have {
|
||||
t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
|
||||
}
|
||||
}
|
||||
if nread != len(headers) {
|
||||
t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonSeekable(t *testing.T) {
|
||||
test := gnuTarTest
|
||||
f, err := os.Open(test.file)
|
||||
|
|
@ -514,187 +526,232 @@ func TestMergePAX(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSparseEndToEnd(t *testing.T) {
|
||||
test := sparseTarTest
|
||||
f, err := os.Open(test.file)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
tr := NewReader(f)
|
||||
|
||||
headers := test.headers
|
||||
cksums := test.cksums
|
||||
nread := 0
|
||||
|
||||
// loop over all files
|
||||
for ; ; nread++ {
|
||||
hdr, err := tr.Next()
|
||||
if hdr == nil || err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
// check the header
|
||||
if !reflect.DeepEqual(*hdr, *headers[nread]) {
|
||||
t.Errorf("Incorrect header:\nhave %+v\nwant %+v",
|
||||
*hdr, headers[nread])
|
||||
}
|
||||
|
||||
// read and checksum the file data
|
||||
h := md5.New()
|
||||
_, err = io.Copy(h, tr)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// verify checksum
|
||||
have := fmt.Sprintf("%x", h.Sum(nil))
|
||||
want := cksums[nread]
|
||||
if want != have {
|
||||
t.Errorf("Bad checksum on file %s:\nhave %+v\nwant %+v", hdr.Name, have, want)
|
||||
}
|
||||
}
|
||||
if nread != len(headers) {
|
||||
t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(headers), nread)
|
||||
}
|
||||
}
|
||||
|
||||
type sparseFileReadTest struct {
|
||||
sparseData []byte
|
||||
sparseMap []sparseEntry
|
||||
realSize int64
|
||||
expected []byte
|
||||
}
|
||||
|
||||
var sparseFileReadTests = []sparseFileReadTest{
|
||||
{
|
||||
sparseData: []byte("abcde"),
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 0, numBytes: 2},
|
||||
{offset: 5, numBytes: 3},
|
||||
},
|
||||
realSize: 8,
|
||||
expected: []byte("ab\x00\x00\x00cde"),
|
||||
},
|
||||
{
|
||||
sparseData: []byte("abcde"),
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 0, numBytes: 2},
|
||||
{offset: 5, numBytes: 3},
|
||||
},
|
||||
realSize: 10,
|
||||
expected: []byte("ab\x00\x00\x00cde\x00\x00"),
|
||||
},
|
||||
{
|
||||
sparseData: []byte("abcde"),
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 1, numBytes: 3},
|
||||
{offset: 6, numBytes: 2},
|
||||
},
|
||||
realSize: 8,
|
||||
expected: []byte("\x00abc\x00\x00de"),
|
||||
},
|
||||
{
|
||||
sparseData: []byte("abcde"),
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 1, numBytes: 3},
|
||||
{offset: 6, numBytes: 2},
|
||||
},
|
||||
realSize: 10,
|
||||
expected: []byte("\x00abc\x00\x00de\x00\x00"),
|
||||
},
|
||||
{
|
||||
sparseData: []byte(""),
|
||||
sparseMap: nil,
|
||||
realSize: 2,
|
||||
expected: []byte("\x00\x00"),
|
||||
},
|
||||
}
|
||||
|
||||
func TestSparseFileReader(t *testing.T) {
|
||||
for i, test := range sparseFileReadTests {
|
||||
r := bytes.NewReader(test.sparseData)
|
||||
nb := int64(r.Len())
|
||||
sfr := &sparseFileReader{
|
||||
rfr: ®FileReader{r: r, nb: nb},
|
||||
sp: test.sparseMap,
|
||||
pos: 0,
|
||||
tot: test.realSize,
|
||||
}
|
||||
if sfr.numBytes() != nb {
|
||||
t.Errorf("test %d: Before reading, sfr.numBytes() = %d, want %d", i, sfr.numBytes(), nb)
|
||||
}
|
||||
buf, err := ioutil.ReadAll(sfr)
|
||||
var vectors = []struct {
|
||||
realSize int64 // Real size of the output file
|
||||
sparseMap []sparseEntry // Input sparse map
|
||||
sparseData string // Input compact data
|
||||
expected string // Expected output data
|
||||
err error // Expected error outcome
|
||||
}{{
|
||||
realSize: 8,
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 0, numBytes: 2},
|
||||
{offset: 5, numBytes: 3},
|
||||
},
|
||||
sparseData: "abcde",
|
||||
expected: "ab\x00\x00\x00cde",
|
||||
}, {
|
||||
realSize: 10,
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 0, numBytes: 2},
|
||||
{offset: 5, numBytes: 3},
|
||||
},
|
||||
sparseData: "abcde",
|
||||
expected: "ab\x00\x00\x00cde\x00\x00",
|
||||
}, {
|
||||
realSize: 8,
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 1, numBytes: 3},
|
||||
{offset: 6, numBytes: 2},
|
||||
},
|
||||
sparseData: "abcde",
|
||||
expected: "\x00abc\x00\x00de",
|
||||
}, {
|
||||
realSize: 8,
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 1, numBytes: 3},
|
||||
{offset: 6, numBytes: 0},
|
||||
{offset: 6, numBytes: 0},
|
||||
{offset: 6, numBytes: 2},
|
||||
},
|
||||
sparseData: "abcde",
|
||||
expected: "\x00abc\x00\x00de",
|
||||
}, {
|
||||
realSize: 10,
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 1, numBytes: 3},
|
||||
{offset: 6, numBytes: 2},
|
||||
},
|
||||
sparseData: "abcde",
|
||||
expected: "\x00abc\x00\x00de\x00\x00",
|
||||
}, {
|
||||
realSize: 10,
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 1, numBytes: 3},
|
||||
{offset: 6, numBytes: 2},
|
||||
{offset: 8, numBytes: 0},
|
||||
{offset: 8, numBytes: 0},
|
||||
{offset: 8, numBytes: 0},
|
||||
{offset: 8, numBytes: 0},
|
||||
},
|
||||
sparseData: "abcde",
|
||||
expected: "\x00abc\x00\x00de\x00\x00",
|
||||
}, {
|
||||
realSize: 2,
|
||||
sparseMap: []sparseEntry{},
|
||||
sparseData: "",
|
||||
expected: "\x00\x00",
|
||||
}, {
|
||||
realSize: -2,
|
||||
sparseMap: []sparseEntry{},
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
realSize: -10,
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 1, numBytes: 3},
|
||||
{offset: 6, numBytes: 2},
|
||||
},
|
||||
sparseData: "abcde",
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
realSize: 10,
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 1, numBytes: 3},
|
||||
{offset: 6, numBytes: 5},
|
||||
},
|
||||
sparseData: "abcde",
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
realSize: 35,
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 1, numBytes: 3},
|
||||
{offset: 6, numBytes: 5},
|
||||
},
|
||||
sparseData: "abcde",
|
||||
err: io.ErrUnexpectedEOF,
|
||||
}, {
|
||||
realSize: 35,
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 1, numBytes: 3},
|
||||
{offset: 6, numBytes: -5},
|
||||
},
|
||||
sparseData: "abcde",
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
realSize: 35,
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: math.MaxInt64, numBytes: 3},
|
||||
{offset: 6, numBytes: -5},
|
||||
},
|
||||
sparseData: "abcde",
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
realSize: 10,
|
||||
sparseMap: []sparseEntry{
|
||||
{offset: 1, numBytes: 3},
|
||||
{offset: 2, numBytes: 2},
|
||||
},
|
||||
sparseData: "abcde",
|
||||
err: ErrHeader,
|
||||
}}
|
||||
|
||||
for i, v := range vectors {
|
||||
r := bytes.NewReader([]byte(v.sparseData))
|
||||
rfr := ®FileReader{r: r, nb: int64(len(v.sparseData))}
|
||||
|
||||
var sfr *sparseFileReader
|
||||
var err error
|
||||
var buf []byte
|
||||
|
||||
sfr, err = newSparseFileReader(rfr, v.sparseMap, v.realSize)
|
||||
if err != nil {
|
||||
t.Errorf("test %d: Unexpected error: %v", i, err)
|
||||
goto fail
|
||||
}
|
||||
if e := test.expected; !bytes.Equal(buf, e) {
|
||||
t.Errorf("test %d: Contents = %v, want %v", i, buf, e)
|
||||
if sfr.numBytes() != int64(len(v.sparseData)) {
|
||||
t.Errorf("test %d, numBytes() before reading: got %d, want %d", i, sfr.numBytes(), len(v.sparseData))
|
||||
}
|
||||
buf, err = ioutil.ReadAll(sfr)
|
||||
if err != nil {
|
||||
goto fail
|
||||
}
|
||||
if string(buf) != v.expected {
|
||||
t.Errorf("test %d, ReadAll(): got %q, want %q", i, string(buf), v.expected)
|
||||
}
|
||||
if sfr.numBytes() != 0 {
|
||||
t.Errorf("test %d: After draining the reader, numBytes() was nonzero", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSparseIncrementalRead(t *testing.T) {
|
||||
sparseMap := []sparseEntry{{10, 2}}
|
||||
sparseData := []byte("Go")
|
||||
expected := "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Go\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
|
||||
r := bytes.NewReader(sparseData)
|
||||
nb := int64(r.Len())
|
||||
sfr := &sparseFileReader{
|
||||
rfr: ®FileReader{r: r, nb: nb},
|
||||
sp: sparseMap,
|
||||
pos: 0,
|
||||
tot: int64(len(expected)),
|
||||
t.Errorf("test %d, numBytes() after reading: got %d, want %d", i, sfr.numBytes(), 0)
|
||||
}
|
||||
|
||||
// We'll read the data 6 bytes at a time, with a hole of size 10 at
|
||||
// the beginning and one of size 8 at the end.
|
||||
var outputBuf bytes.Buffer
|
||||
buf := make([]byte, 6)
|
||||
for {
|
||||
n, err := sfr.Read(buf)
|
||||
if err == io.EOF {
|
||||
break
|
||||
fail:
|
||||
if err != v.err {
|
||||
t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Read: unexpected error %v\n", err)
|
||||
}
|
||||
if n > 0 {
|
||||
_, err := outputBuf.Write(buf[:n])
|
||||
if err != nil {
|
||||
t.Errorf("Write: unexpected error %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
got := outputBuf.String()
|
||||
if got != expected {
|
||||
t.Errorf("Contents = %v, want %v", got, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadGNUSparseMap0x1(t *testing.T) {
|
||||
headers := map[string]string{
|
||||
const (
|
||||
maxUint = ^uint(0)
|
||||
maxInt = int(maxUint >> 1)
|
||||
)
|
||||
var (
|
||||
big1 = fmt.Sprintf("%d", int64(maxInt))
|
||||
big2 = fmt.Sprintf("%d", (int64(maxInt)/2)+1)
|
||||
big3 = fmt.Sprintf("%d", (int64(maxInt) / 3))
|
||||
)
|
||||
|
||||
var vectors = []struct {
|
||||
extHdrs map[string]string // Input data
|
||||
sparseMap []sparseEntry // Expected sparse entries to be outputted
|
||||
err error // Expected errors that may be raised
|
||||
}{{
|
||||
extHdrs: map[string]string{paxGNUSparseNumBlocks: "-4"},
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
extHdrs: map[string]string{paxGNUSparseNumBlocks: "fee "},
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
extHdrs: map[string]string{
|
||||
paxGNUSparseNumBlocks: big1,
|
||||
paxGNUSparseMap: "0,5,10,5,20,5,30,5",
|
||||
},
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
extHdrs: map[string]string{
|
||||
paxGNUSparseNumBlocks: big2,
|
||||
paxGNUSparseMap: "0,5,10,5,20,5,30,5",
|
||||
},
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
extHdrs: map[string]string{
|
||||
paxGNUSparseNumBlocks: big3,
|
||||
paxGNUSparseMap: "0,5,10,5,20,5,30,5",
|
||||
},
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
extHdrs: map[string]string{
|
||||
paxGNUSparseNumBlocks: "4",
|
||||
paxGNUSparseMap: "0.5,5,10,5,20,5,30,5",
|
||||
},
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
extHdrs: map[string]string{
|
||||
paxGNUSparseNumBlocks: "4",
|
||||
paxGNUSparseMap: "0,5.5,10,5,20,5,30,5",
|
||||
},
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
extHdrs: map[string]string{
|
||||
paxGNUSparseNumBlocks: "4",
|
||||
paxGNUSparseMap: "0,fewafewa.5,fewafw,5,20,5,30,5",
|
||||
},
|
||||
err: ErrHeader,
|
||||
}, {
|
||||
extHdrs: map[string]string{
|
||||
paxGNUSparseNumBlocks: "4",
|
||||
paxGNUSparseMap: "0,5,10,5,20,5,30,5",
|
||||
}
|
||||
expected := []sparseEntry{
|
||||
{offset: 0, numBytes: 5},
|
||||
{offset: 10, numBytes: 5},
|
||||
{offset: 20, numBytes: 5},
|
||||
{offset: 30, numBytes: 5},
|
||||
}
|
||||
},
|
||||
sparseMap: []sparseEntry{{0, 5}, {10, 5}, {20, 5}, {30, 5}},
|
||||
}}
|
||||
|
||||
sp, err := readGNUSparseMap0x1(headers)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
for i, v := range vectors {
|
||||
sp, err := readGNUSparseMap0x1(v.extHdrs)
|
||||
if !reflect.DeepEqual(sp, v.sparseMap) && !(len(sp) == 0 && len(v.sparseMap) == 0) {
|
||||
t.Errorf("test %d, readGNUSparseMap0x1(...): got %v, want %v", i, sp, v.sparseMap)
|
||||
}
|
||||
if err != v.err {
|
||||
t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err)
|
||||
}
|
||||
if !reflect.DeepEqual(sp, expected) {
|
||||
t.Errorf("Incorrect sparse map: got %v, wanted %v", sp, expected)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -746,53 +803,3 @@ func TestUninitializedRead(t *testing.T) {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
// Negative header size should not cause panic.
|
||||
// Issues 10959 and 10960.
|
||||
func TestNegativeHdrSize(t *testing.T) {
|
||||
f, err := os.Open("testdata/neg-size.tar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
r := NewReader(f)
|
||||
_, err = r.Next()
|
||||
if err != ErrHeader {
|
||||
t.Error("want ErrHeader, got", err)
|
||||
}
|
||||
io.Copy(ioutil.Discard, r)
|
||||
}
|
||||
|
||||
// This used to hang in (*sparseFileReader).readHole due to missing
|
||||
// verification of sparse offsets against file size.
|
||||
func TestIssue10968(t *testing.T) {
|
||||
f, err := os.Open("testdata/issue10968.tar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
r := NewReader(f)
|
||||
_, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = io.Copy(ioutil.Discard, r)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("expected %q, got %q", io.ErrUnexpectedEOF, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Do not panic if there are errors in header blocks after the pax header.
|
||||
// Issue 11169
|
||||
func TestIssue11169(t *testing.T) {
|
||||
f, err := os.Open("testdata/issue11169.tar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
r := NewReader(f)
|
||||
_, err = r.Next()
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected success")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
BIN
src/archive/tar/testdata/issue12435.tar
vendored
Normal file
BIN
src/archive/tar/testdata/issue12435.tar
vendored
Normal file
Binary file not shown.
|
|
@ -23,7 +23,6 @@ var (
|
|||
ErrWriteTooLong = errors.New("archive/tar: write too long")
|
||||
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
||||
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
||||
errNameTooLong = errors.New("archive/tar: name too long")
|
||||
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
|
||||
)
|
||||
|
||||
|
|
@ -215,26 +214,14 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
|||
_, paxPathUsed := paxHeaders[paxPath]
|
||||
// try to use a ustar header when only the name is too long
|
||||
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
||||
suffix := hdr.Name
|
||||
prefix := ""
|
||||
if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
|
||||
var err error
|
||||
prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
|
||||
if err == nil {
|
||||
// ok we can use a ustar long name instead of pax, now correct the fields
|
||||
|
||||
// remove the path field from the pax header. this will suppress the pax header
|
||||
prefix, suffix, ok := splitUSTARPath(hdr.Name)
|
||||
if ok {
|
||||
// Since we can encode in USTAR format, disable PAX header.
|
||||
delete(paxHeaders, paxPath)
|
||||
|
||||
// update the path fields
|
||||
// Update the path fields
|
||||
tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
|
||||
tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
|
||||
|
||||
// Use the ustar magic if we used ustar long names.
|
||||
if len(prefix) > 0 && !tw.usedBinary {
|
||||
copy(header[257:265], []byte("ustar\x00"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -270,28 +257,25 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
|||
return tw.err
|
||||
}
|
||||
|
||||
// writeUSTARLongName splits a USTAR long name hdr.Name.
|
||||
// name must be < 256 characters. errNameTooLong is returned
|
||||
// if hdr.Name can't be split. The splitting heuristic
|
||||
// is compatible with gnu tar.
|
||||
func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) {
|
||||
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
|
||||
// If the path is not splittable, then it will return ("", "", false).
|
||||
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
|
||||
length := len(name)
|
||||
if length > fileNamePrefixSize+1 {
|
||||
if length <= fileNameSize || !isASCII(name) {
|
||||
return "", "", false
|
||||
} else if length > fileNamePrefixSize+1 {
|
||||
length = fileNamePrefixSize + 1
|
||||
} else if name[length-1] == '/' {
|
||||
length--
|
||||
}
|
||||
|
||||
i := strings.LastIndex(name[:length], "/")
|
||||
// nlen contains the resulting length in the name field.
|
||||
// plen contains the resulting length in the prefix field.
|
||||
nlen := len(name) - i - 1
|
||||
plen := i
|
||||
nlen := len(name) - i - 1 // nlen is length of suffix
|
||||
plen := i // plen is length of prefix
|
||||
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
||||
err = errNameTooLong
|
||||
return
|
||||
return "", "", false
|
||||
}
|
||||
prefix, suffix = name[:i], name[i+1:]
|
||||
return
|
||||
return name[:i], name[i+1:], true
|
||||
}
|
||||
|
||||
// writePaxHeader writes an extended pax header to the
|
||||
|
|
|
|||
|
|
@ -544,3 +544,37 @@ func TestWriteAfterClose(t *testing.T) {
|
|||
t.Fatalf("Write: got %v; want ErrWriteAfterClose", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitUSTARPath(t *testing.T) {
|
||||
var sr = strings.Repeat
|
||||
|
||||
var vectors = []struct {
|
||||
input string // Input path
|
||||
prefix string // Expected output prefix
|
||||
suffix string // Expected output suffix
|
||||
ok bool // Split success?
|
||||
}{
|
||||
{"", "", "", false},
|
||||
{"abc", "", "", false},
|
||||
{"用戶名", "", "", false},
|
||||
{sr("a", fileNameSize), "", "", false},
|
||||
{sr("a", fileNameSize) + "/", "", "", false},
|
||||
{sr("a", fileNameSize) + "/a", sr("a", fileNameSize), "a", true},
|
||||
{sr("a", fileNamePrefixSize) + "/", "", "", false},
|
||||
{sr("a", fileNamePrefixSize) + "/a", sr("a", fileNamePrefixSize), "a", true},
|
||||
{sr("a", fileNameSize+1), "", "", false},
|
||||
{sr("/", fileNameSize+1), sr("/", fileNameSize-1), "/", true},
|
||||
{sr("a", fileNamePrefixSize) + "/" + sr("b", fileNameSize),
|
||||
sr("a", fileNamePrefixSize), sr("b", fileNameSize), true},
|
||||
{sr("a", fileNamePrefixSize) + "//" + sr("b", fileNameSize), "", "", false},
|
||||
{sr("a/", fileNameSize), sr("a/", 77) + "a", sr("a/", 22), true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
prefix, suffix, ok := splitUSTARPath(v.input)
|
||||
if prefix != v.prefix || suffix != v.suffix || ok != v.ok {
|
||||
t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)",
|
||||
v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -376,6 +376,8 @@ func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error)
|
|||
}
|
||||
d.comment = string(b[:l])
|
||||
|
||||
// These values mean that the file can be a zip64 file
|
||||
if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff {
|
||||
p, err := findDirectory64End(r, directoryEndOffset)
|
||||
if err == nil && p >= 0 {
|
||||
err = readDirectory64End(r, p, d)
|
||||
|
|
@ -383,7 +385,7 @@ func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error)
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
// Make sure directoryOffset points to somewhere in our file.
|
||||
if o := int64(d.directoryOffset); o < 0 || o >= size {
|
||||
return nil, ErrFormat
|
||||
|
|
@ -407,8 +409,13 @@ func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error)
|
|||
if sig := b.uint32(); sig != directory64LocSignature {
|
||||
return -1, nil
|
||||
}
|
||||
b = b[4:] // skip number of the disk with the start of the zip64 end of central directory
|
||||
if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory
|
||||
return -1, nil // the file is not a valid zip64-file
|
||||
}
|
||||
p := b.uint64() // relative offset of the zip64 end of central directory record
|
||||
if b.uint32() != 1 { // total number of disks
|
||||
return -1, nil // the file is not a valid zip64-file
|
||||
}
|
||||
return int64(p), nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -605,3 +605,40 @@ func TestIssue11146(t *testing.T) {
|
|||
}
|
||||
r.Close()
|
||||
}
|
||||
|
||||
// Verify we do not treat non-zip64 archives as zip64
|
||||
func TestIssue12449(t *testing.T) {
|
||||
data := []byte{
|
||||
0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
|
||||
0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x03, 0x00, 0x18, 0x00, 0xca, 0x64,
|
||||
0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05,
|
||||
0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
|
||||
0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00,
|
||||
0x00, 0x31, 0x31, 0x31, 0x32, 0x32, 0x32, 0x0a,
|
||||
0x50, 0x4b, 0x07, 0x08, 0x1d, 0x88, 0x77, 0xb0,
|
||||
0x07, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00,
|
||||
0x50, 0x4b, 0x01, 0x02, 0x14, 0x03, 0x14, 0x00,
|
||||
0x08, 0x00, 0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46,
|
||||
0x1d, 0x88, 0x77, 0xb0, 0x07, 0x00, 0x00, 0x00,
|
||||
0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x18, 0x00,
|
||||
0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0xa0, 0x81, 0x00, 0x00, 0x00, 0x00, 0xca, 0x64,
|
||||
0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05,
|
||||
0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
|
||||
0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00,
|
||||
0x00, 0x97, 0x2b, 0x49, 0x23, 0x05, 0xc5, 0x0b,
|
||||
0xa7, 0xd1, 0x52, 0xa2, 0x9c, 0x50, 0x4b, 0x06,
|
||||
0x07, 0xc8, 0x19, 0xc1, 0xaf, 0x94, 0x9c, 0x61,
|
||||
0x44, 0xbe, 0x94, 0x19, 0x42, 0x58, 0x12, 0xc6,
|
||||
0x5b, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00,
|
||||
0x00, 0x01, 0x00, 0x01, 0x00, 0x69, 0x00, 0x00,
|
||||
0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}
|
||||
// Read in the archive.
|
||||
_, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data)))
|
||||
if err != nil {
|
||||
t.Errorf("Error reading the archive: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -80,3 +80,32 @@ func ExampleScanner_custom() {
|
|||
// 5678
|
||||
// Invalid input: strconv.ParseInt: parsing "1234567901234567890": value out of range
|
||||
}
|
||||
|
||||
// Use a Scanner with a custom split function to parse a comma-separated
|
||||
// list with an empty final value.
|
||||
func ExampleScanner_emptyFinalToken() {
|
||||
// Comma-separated list; last entry is empty.
|
||||
const input = "1,2,3,4,"
|
||||
scanner := bufio.NewScanner(strings.NewReader(input))
|
||||
// Define a split function that separates on commas.
|
||||
onComma := func(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
for i := 0; i < len(data); i++ {
|
||||
if data[i] == ',' {
|
||||
return i + 1, data[:i], nil
|
||||
}
|
||||
}
|
||||
// There is one final token to be delivered, which may be the empty string.
|
||||
// Returning bufio.ErrFinalToken here tells Scan there are no more tokens after this
|
||||
// but does not trigger an error to be returned from Scan itself.
|
||||
return 0, data, bufio.ErrFinalToken
|
||||
}
|
||||
scanner.Split(onComma)
|
||||
// Scan.
|
||||
for scanner.Scan() {
|
||||
fmt.Printf("%q ", scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "reading input:", err)
|
||||
}
|
||||
// Output: "1" "2" "3" "4" ""
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,6 +37,8 @@ type Scanner struct {
|
|||
end int // End of data in buf.
|
||||
err error // Sticky error.
|
||||
empties int // Count of successive empty tokens.
|
||||
scanCalled bool // Scan has been called; buffer is in use.
|
||||
done bool // Scan has finished.
|
||||
}
|
||||
|
||||
// SplitFunc is the signature of the split function used to tokenize the
|
||||
|
|
@ -65,10 +67,13 @@ var (
|
|||
)
|
||||
|
||||
const (
|
||||
// MaxScanTokenSize is the maximum size used to buffer a token.
|
||||
// MaxScanTokenSize is the maximum size used to buffer a token
|
||||
// unless the user provides an explicit buffer with Scan.Buffer.
|
||||
// The actual maximum token size may be smaller as the buffer
|
||||
// may need to include, for instance, a newline.
|
||||
MaxScanTokenSize = 64 * 1024
|
||||
|
||||
startBufSize = 4096 // Size of initial allocation for buffer.
|
||||
)
|
||||
|
||||
// NewScanner returns a new Scanner to read from r.
|
||||
|
|
@ -78,7 +83,6 @@ func NewScanner(r io.Reader) *Scanner {
|
|||
r: r,
|
||||
split: ScanLines,
|
||||
maxTokenSize: MaxScanTokenSize,
|
||||
buf: make([]byte, 4096), // Plausible starting size; needn't be large.
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -103,6 +107,16 @@ func (s *Scanner) Text() string {
|
|||
return string(s.token)
|
||||
}
|
||||
|
||||
// ErrFinalToken is a special sentinel error value. It is intended to be
|
||||
// returned by a Split function to indicate that the token being delivered
|
||||
// with the error is the last token and scanning should stop after this one.
|
||||
// After ErrFinalToken is received by Scan, scanning stops with no error.
|
||||
// The value is useful to stop processing early or when it is necessary to
|
||||
// deliver a final empty token. One could achieve the same behavior
|
||||
// with a custom error value but providing one here is tidier.
|
||||
// See the emptyFinalToken example for a use of this value.
|
||||
var ErrFinalToken = errors.New("final token")
|
||||
|
||||
// Scan advances the Scanner to the next token, which will then be
|
||||
// available through the Bytes or Text method. It returns false when the
|
||||
// scan stops, either by reaching the end of the input or an error.
|
||||
|
|
@ -112,6 +126,10 @@ func (s *Scanner) Text() string {
|
|||
// Scan panics if the split function returns 100 empty tokens without
|
||||
// advancing the input. This is a common error mode for scanners.
|
||||
func (s *Scanner) Scan() bool {
|
||||
if s.done {
|
||||
return false
|
||||
}
|
||||
s.scanCalled = true
|
||||
// Loop until we have a token.
|
||||
for {
|
||||
// See if we can get a token with what we already have.
|
||||
|
|
@ -120,6 +138,11 @@ func (s *Scanner) Scan() bool {
|
|||
if s.end > s.start || s.err != nil {
|
||||
advance, token, err := s.split(s.buf[s.start:s.end], s.err != nil)
|
||||
if err != nil {
|
||||
if err == ErrFinalToken {
|
||||
s.token = token
|
||||
s.done = true
|
||||
return true
|
||||
}
|
||||
s.setErr(err)
|
||||
return false
|
||||
}
|
||||
|
|
@ -158,11 +181,16 @@ func (s *Scanner) Scan() bool {
|
|||
}
|
||||
// Is the buffer full? If so, resize.
|
||||
if s.end == len(s.buf) {
|
||||
if len(s.buf) >= s.maxTokenSize {
|
||||
// Guarantee no overflow in the multiplication below.
|
||||
const maxInt = int(^uint(0) >> 1)
|
||||
if len(s.buf) >= s.maxTokenSize || len(s.buf) > maxInt/2 {
|
||||
s.setErr(ErrTooLong)
|
||||
return false
|
||||
}
|
||||
newSize := len(s.buf) * 2
|
||||
if newSize == 0 {
|
||||
newSize = startBufSize
|
||||
}
|
||||
if newSize > s.maxTokenSize {
|
||||
newSize = s.maxTokenSize
|
||||
}
|
||||
|
|
@ -217,9 +245,31 @@ func (s *Scanner) setErr(err error) {
|
|||
}
|
||||
}
|
||||
|
||||
// Split sets the split function for the Scanner. If called, it must be
|
||||
// called before Scan. The default split function is ScanLines.
|
||||
// Buffer sets the initial buffer to use when scanning and the maximum
|
||||
// size of buffer that may be allocated during scanning. The maximum
|
||||
// token size is the larger of max and cap(buf). If max <= cap(buf),
|
||||
// Scan will use this buffer only and do no allocation.
|
||||
//
|
||||
// By default, Scan uses an internal buffer and sets the
|
||||
// maximum token size to MaxScanTokenSize.
|
||||
//
|
||||
// Buffer panics if it is called after scanning has started.
|
||||
func (s *Scanner) Buffer(buf []byte, max int) {
|
||||
if s.scanCalled {
|
||||
panic("Buffer called after Scan")
|
||||
}
|
||||
s.buf = buf[0:cap(buf)]
|
||||
s.maxTokenSize = max
|
||||
}
|
||||
|
||||
// Split sets the split function for the Scanner.
|
||||
// The default split function is ScanLines.
|
||||
//
|
||||
// Split panics if it is called after scanning has started.
|
||||
func (s *Scanner) Split(split SplitFunc) {
|
||||
if s.scanCalled {
|
||||
panic("Split called after Scan")
|
||||
}
|
||||
s.split = split
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -429,33 +429,37 @@ func commaSplit(data []byte, atEOF bool) (advance int, token []byte, err error)
|
|||
return i + 1, data[:i], nil
|
||||
}
|
||||
}
|
||||
if !atEOF {
|
||||
return 0, nil, nil
|
||||
}
|
||||
return 0, data, nil
|
||||
return 0, data, ErrFinalToken
|
||||
}
|
||||
|
||||
func TestEmptyTokens(t *testing.T) {
|
||||
s := NewScanner(strings.NewReader("1,2,3,"))
|
||||
values := []string{"1", "2", "3", ""}
|
||||
func testEmptyTokens(t *testing.T, text string, values []string) {
|
||||
s := NewScanner(strings.NewReader(text))
|
||||
s.Split(commaSplit)
|
||||
var i int
|
||||
for i = 0; i < len(values); i++ {
|
||||
if !s.Scan() {
|
||||
break
|
||||
for i = 0; s.Scan(); i++ {
|
||||
if i >= len(values) {
|
||||
t.Fatalf("got %d fields, expected %d", i+1, len(values))
|
||||
}
|
||||
if s.Text() != values[i] {
|
||||
t.Errorf("%d: expected %q got %q", i, values[i], s.Text())
|
||||
}
|
||||
}
|
||||
if i != len(values) {
|
||||
t.Errorf("got %d fields, expected %d", i, len(values))
|
||||
t.Fatalf("got %d fields, expected %d", i, len(values))
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyTokens(t *testing.T) {
|
||||
testEmptyTokens(t, "1,2,3,", []string{"1", "2", "3", ""})
|
||||
}
|
||||
|
||||
func TestWithNoEmptyTokens(t *testing.T) {
|
||||
testEmptyTokens(t, "1,2,3", []string{"1", "2", "3"})
|
||||
}
|
||||
|
||||
func loopAtEOFSplit(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if len(data) > 0 {
|
||||
return 1, data[:1], nil
|
||||
|
|
@ -522,3 +526,19 @@ func TestEmptyLinesOK(t *testing.T) {
|
|||
t.Fatalf("stopped with %d left to process", c)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure we can read a huge token if a big enough buffer is provided.
|
||||
func TestHugeBuffer(t *testing.T) {
|
||||
text := strings.Repeat("x", 2*MaxScanTokenSize)
|
||||
s := NewScanner(strings.NewReader(text + "\n"))
|
||||
s.Buffer(make([]byte, 100), 3*MaxScanTokenSize)
|
||||
for s.Scan() {
|
||||
token := s.Text()
|
||||
if token != text {
|
||||
t.Errorf("scan got incorrect token of length %d", len(token))
|
||||
}
|
||||
}
|
||||
if s.Err() != nil {
|
||||
t.Fatal("after scan:", s.Err())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -428,10 +428,15 @@ func (w *Walker) Import(name string) (*types.Package, error) {
|
|||
}
|
||||
w.imported[name] = &importing
|
||||
|
||||
root := w.root
|
||||
if strings.HasPrefix(name, "golang.org/x/") {
|
||||
root = filepath.Join(root, "vendor")
|
||||
}
|
||||
|
||||
// Determine package files.
|
||||
dir := filepath.Join(w.root, filepath.FromSlash(name))
|
||||
dir := filepath.Join(root, filepath.FromSlash(name))
|
||||
if fi, err := os.Stat(dir); err != nil || !fi.IsDir() {
|
||||
log.Fatalf("no source in tree for package %q", pkg)
|
||||
log.Fatalf("no source in tree for import %q: %v", name, err)
|
||||
}
|
||||
|
||||
context := w.context
|
||||
|
|
|
|||
|
|
@ -252,7 +252,9 @@ func archArm64() *Arch {
|
|||
register["EQ"] = arm64.COND_EQ
|
||||
register["NE"] = arm64.COND_NE
|
||||
register["HS"] = arm64.COND_HS
|
||||
register["CS"] = arm64.COND_HS
|
||||
register["LO"] = arm64.COND_LO
|
||||
register["CC"] = arm64.COND_LO
|
||||
register["MI"] = arm64.COND_MI
|
||||
register["PL"] = arm64.COND_PL
|
||||
register["VS"] = arm64.COND_VS
|
||||
|
|
|
|||
|
|
@ -27,15 +27,18 @@ func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) {
|
|||
case '5':
|
||||
if !arch.ARMConditionCodes(prog, cond) {
|
||||
p.errorf("unrecognized condition code .%q", cond)
|
||||
return
|
||||
}
|
||||
|
||||
case '7':
|
||||
if !arch.ARM64Suffix(prog, cond) {
|
||||
p.errorf("unrecognized suffix .%q", cond)
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
p.errorf("unrecognized suffix .%q", cond)
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.firstProg == nil {
|
||||
|
|
@ -49,6 +52,7 @@ func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) {
|
|||
for _, label := range p.pendingLabels {
|
||||
if p.labels[label] != nil {
|
||||
p.errorf("label %q multiply defined", label)
|
||||
return
|
||||
}
|
||||
p.labels[label] = prog
|
||||
}
|
||||
|
|
@ -63,14 +67,17 @@ func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) {
|
|||
}
|
||||
}
|
||||
|
||||
// validateSymbol checks that addr represents a valid name for a pseudo-op.
|
||||
func (p *Parser) validateSymbol(pseudo string, addr *obj.Addr, offsetOk bool) {
|
||||
// validSymbol checks that addr represents a valid name for a pseudo-op.
|
||||
func (p *Parser) validSymbol(pseudo string, addr *obj.Addr, offsetOk bool) bool {
|
||||
if addr.Name != obj.NAME_EXTERN && addr.Name != obj.NAME_STATIC || addr.Scale != 0 || addr.Reg != 0 {
|
||||
p.errorf("%s symbol %q must be a symbol(SB)", pseudo, symbolName(addr))
|
||||
return false
|
||||
}
|
||||
if !offsetOk && addr.Offset != 0 {
|
||||
p.errorf("%s symbol %q must not be offset from SB", pseudo, symbolName(addr))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// evalInteger evaluates an integer constant for a pseudo-op.
|
||||
|
|
@ -79,11 +86,13 @@ func (p *Parser) evalInteger(pseudo string, operands []lex.Token) int64 {
|
|||
return p.getConstantPseudo(pseudo, &addr)
|
||||
}
|
||||
|
||||
// validateImmediate checks that addr represents an immediate constant.
|
||||
func (p *Parser) validateImmediate(pseudo string, addr *obj.Addr) {
|
||||
// validImmediate checks that addr represents an immediate constant.
|
||||
func (p *Parser) validImmediate(pseudo string, addr *obj.Addr) bool {
|
||||
if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
|
||||
p.errorf("%s: expected immediate constant; found %s", pseudo, obj.Dconv(&emptyProg, addr))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// asmText assembles a TEXT pseudo-op.
|
||||
|
|
@ -102,7 +111,9 @@ func (p *Parser) asmText(word string, operands [][]lex.Token) {
|
|||
// Operand 0 is the symbol name in the form foo(SB).
|
||||
// That means symbol plus indirect on SB and no offset.
|
||||
nameAddr := p.address(operands[0])
|
||||
p.validateSymbol("TEXT", &nameAddr, false)
|
||||
if !p.validSymbol("TEXT", &nameAddr, false) {
|
||||
return
|
||||
}
|
||||
name := symbolName(&nameAddr)
|
||||
next := 1
|
||||
|
||||
|
|
@ -144,6 +155,7 @@ func (p *Parser) asmText(word string, operands [][]lex.Token) {
|
|||
// There is an argument size. It must be a minus sign followed by a non-negative integer literal.
|
||||
if len(op) != 2 || op[0].ScanToken != '-' || op[1].ScanToken != scanner.Int {
|
||||
p.errorf("TEXT %s: argument size must be of form -integer", name)
|
||||
return
|
||||
}
|
||||
argSize = p.positiveAtoi(op[1].String())
|
||||
}
|
||||
|
|
@ -185,7 +197,9 @@ func (p *Parser) asmData(word string, operands [][]lex.Token) {
|
|||
scale := p.parseScale(op[n-1].String())
|
||||
op = op[:n-2]
|
||||
nameAddr := p.address(op)
|
||||
p.validateSymbol("DATA", &nameAddr, true)
|
||||
if !p.validSymbol("DATA", &nameAddr, true) {
|
||||
return
|
||||
}
|
||||
name := symbolName(&nameAddr)
|
||||
|
||||
// Operand 1 is an immediate constant or address.
|
||||
|
|
@ -195,11 +209,13 @@ func (p *Parser) asmData(word string, operands [][]lex.Token) {
|
|||
// OK
|
||||
default:
|
||||
p.errorf("DATA value must be an immediate constant or address")
|
||||
return
|
||||
}
|
||||
|
||||
// The addresses must not overlap. Easiest test: require monotonicity.
|
||||
if lastAddr, ok := p.dataAddr[name]; ok && nameAddr.Offset < lastAddr {
|
||||
p.errorf("overlapping DATA entry for %s", name)
|
||||
return
|
||||
}
|
||||
p.dataAddr[name] = nameAddr.Offset + int64(scale)
|
||||
|
||||
|
|
@ -228,7 +244,9 @@ func (p *Parser) asmGlobl(word string, operands [][]lex.Token) {
|
|||
|
||||
// Operand 0 has the general form foo<>+0x04(SB).
|
||||
nameAddr := p.address(operands[0])
|
||||
p.validateSymbol("GLOBL", &nameAddr, false)
|
||||
if !p.validSymbol("GLOBL", &nameAddr, false) {
|
||||
return
|
||||
}
|
||||
next := 1
|
||||
|
||||
// Next operand is the optional flag, a literal integer.
|
||||
|
|
@ -240,7 +258,9 @@ func (p *Parser) asmGlobl(word string, operands [][]lex.Token) {
|
|||
|
||||
// Final operand is an immediate constant.
|
||||
addr := p.address(operands[next])
|
||||
p.validateImmediate("GLOBL", &addr)
|
||||
if !p.validImmediate("GLOBL", &addr) {
|
||||
return
|
||||
}
|
||||
|
||||
// log.Printf("GLOBL %s %d, $%d", name, flag, size)
|
||||
prog := &obj.Prog{
|
||||
|
|
@ -266,11 +286,15 @@ func (p *Parser) asmPCData(word string, operands [][]lex.Token) {
|
|||
|
||||
// Operand 0 must be an immediate constant.
|
||||
key := p.address(operands[0])
|
||||
p.validateImmediate("PCDATA", &key)
|
||||
if !p.validImmediate("PCDATA", &key) {
|
||||
return
|
||||
}
|
||||
|
||||
// Operand 1 must be an immediate constant.
|
||||
value := p.address(operands[1])
|
||||
p.validateImmediate("PCDATA", &value)
|
||||
if !p.validImmediate("PCDATA", &value) {
|
||||
return
|
||||
}
|
||||
|
||||
// log.Printf("PCDATA $%d, $%d", key.Offset, value.Offset)
|
||||
prog := &obj.Prog{
|
||||
|
|
@ -293,11 +317,15 @@ func (p *Parser) asmFuncData(word string, operands [][]lex.Token) {
|
|||
|
||||
// Operand 0 must be an immediate constant.
|
||||
valueAddr := p.address(operands[0])
|
||||
p.validateImmediate("FUNCDATA", &valueAddr)
|
||||
if !p.validImmediate("FUNCDATA", &valueAddr) {
|
||||
return
|
||||
}
|
||||
|
||||
// Operand 1 is a symbol name in the form foo(SB).
|
||||
nameAddr := p.address(operands[1])
|
||||
p.validateSymbol("FUNCDATA", &nameAddr, true)
|
||||
if !p.validSymbol("FUNCDATA", &nameAddr, true) {
|
||||
return
|
||||
}
|
||||
|
||||
prog := &obj.Prog{
|
||||
Ctxt: p.ctxt,
|
||||
|
|
@ -340,6 +368,7 @@ func (p *Parser) asmJump(op int, cond string, a []obj.Addr) {
|
|||
reg, ok := p.arch.RegisterNumber("R", int16(reg))
|
||||
if !ok {
|
||||
p.errorf("bad register number %d", reg)
|
||||
return
|
||||
}
|
||||
prog.Reg = reg
|
||||
break
|
||||
|
|
@ -390,6 +419,7 @@ func (p *Parser) asmJump(op int, cond string, a []obj.Addr) {
|
|||
prog.To = a[0]
|
||||
default:
|
||||
p.errorf("cannot assemble jump %+v", target)
|
||||
return
|
||||
}
|
||||
|
||||
p.append(prog, cond, true)
|
||||
|
|
@ -400,9 +430,9 @@ func (p *Parser) patch() {
|
|||
targetProg := p.labels[patch.label]
|
||||
if targetProg == nil {
|
||||
p.errorf("undefined label %s", patch.label)
|
||||
} else {
|
||||
p.branch(patch.prog, targetProg)
|
||||
return
|
||||
}
|
||||
p.branch(patch.prog, targetProg)
|
||||
}
|
||||
p.toPatch = p.toPatch[:0]
|
||||
}
|
||||
|
|
@ -468,6 +498,7 @@ func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
|
|||
break
|
||||
}
|
||||
p.errorf("unrecognized addressing for %s", obj.Aconv(op))
|
||||
return
|
||||
}
|
||||
if arch.IsARMFloatCmp(op) {
|
||||
prog.From = a[0]
|
||||
|
|
@ -506,6 +537,7 @@ func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
|
|||
prog.To = a[1]
|
||||
if a[2].Type != obj.TYPE_REG {
|
||||
p.errorf("invalid addressing modes for third operand to %s instruction, must be register", obj.Aconv(op))
|
||||
return
|
||||
}
|
||||
prog.RegTo2 = a[2].Reg
|
||||
break
|
||||
|
|
@ -541,9 +573,11 @@ func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
|
|||
prog.To = a[2]
|
||||
default:
|
||||
p.errorf("invalid addressing modes for %s instruction", obj.Aconv(op))
|
||||
return
|
||||
}
|
||||
default:
|
||||
p.errorf("TODO: implement three-operand instructions for this architecture")
|
||||
return
|
||||
}
|
||||
case 4:
|
||||
if p.arch.Thechar == '5' && arch.IsARMMULA(op) {
|
||||
|
|
@ -577,6 +611,7 @@ func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
|
|||
break
|
||||
}
|
||||
p.errorf("can't handle %s instruction with 4 operands", obj.Aconv(op))
|
||||
return
|
||||
case 5:
|
||||
if p.arch.Thechar == '9' && arch.IsPPC64RLD(op) {
|
||||
// Always reg, reg, con, con, reg. (con, con is a 'mask').
|
||||
|
|
@ -598,6 +633,7 @@ func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
|
|||
break
|
||||
}
|
||||
p.errorf("can't handle %s instruction with 5 operands", obj.Aconv(op))
|
||||
return
|
||||
case 6:
|
||||
if p.arch.Thechar == '5' && arch.IsARMMRC(op) {
|
||||
// Strange special case: MCR, MRC.
|
||||
|
|
@ -621,6 +657,7 @@ func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
|
|||
fallthrough
|
||||
default:
|
||||
p.errorf("can't handle %s instruction with %d operands", obj.Aconv(op), len(a))
|
||||
return
|
||||
}
|
||||
|
||||
p.append(prog, cond, true)
|
||||
|
|
|
|||
|
|
@ -293,6 +293,7 @@ var armOperandTests = []operandTest{
|
|||
{"[R0,R1,g,R15", ""}, // Issue 11764 - asm hung parsing ']' missing register lists.
|
||||
{"[):[o-FP", ""}, // Issue 12469 - there was no infinite loop for ARM; these are just sanity checks.
|
||||
{"[):[R0-FP", ""},
|
||||
{"(", ""}, // Issue 12466 - backed up before beginning of line.
|
||||
}
|
||||
|
||||
var ppc64OperandTests = []operandTest{
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ type Parser struct {
|
|||
firstProg *obj.Prog
|
||||
lastProg *obj.Prog
|
||||
dataAddr map[string]int64 // Most recent address for DATA for this symbol.
|
||||
isJump bool // Instruction being assembled is a jump.
|
||||
errorWriter io.Writer
|
||||
}
|
||||
|
||||
|
|
@ -155,6 +156,7 @@ func (p *Parser) line() bool {
|
|||
// Remember this location so we can swap the operands below.
|
||||
if colon >= 0 {
|
||||
p.errorf("invalid ':' in operand")
|
||||
return true
|
||||
}
|
||||
colon = len(operands)
|
||||
}
|
||||
|
|
@ -196,15 +198,15 @@ func (p *Parser) line() bool {
|
|||
|
||||
func (p *Parser) instruction(op int, word, cond string, operands [][]lex.Token) {
|
||||
p.addr = p.addr[0:0]
|
||||
isJump := p.arch.IsJump(word)
|
||||
p.isJump = p.arch.IsJump(word)
|
||||
for _, op := range operands {
|
||||
addr := p.address(op)
|
||||
if !isJump && addr.Reg < 0 { // Jumps refer to PC, a pseudo.
|
||||
if !p.isJump && addr.Reg < 0 { // Jumps refer to PC, a pseudo.
|
||||
p.errorf("illegal use of pseudo-register in %s", word)
|
||||
}
|
||||
p.addr = append(p.addr, addr)
|
||||
}
|
||||
if isJump {
|
||||
if p.isJump {
|
||||
p.asmJump(op, cond, p.addr)
|
||||
return
|
||||
}
|
||||
|
|
@ -338,8 +340,13 @@ func (p *Parser) operand(a *obj.Addr) bool {
|
|||
case scanner.Int, scanner.Float, scanner.String, scanner.Char, '+', '-', '~':
|
||||
haveConstant = true
|
||||
case '(':
|
||||
// Could be parenthesized expression or (R).
|
||||
rname := p.next().String()
|
||||
// Could be parenthesized expression or (R). Must be something, though.
|
||||
tok := p.next()
|
||||
if tok.ScanToken == scanner.EOF {
|
||||
p.errorf("missing right parenthesis")
|
||||
return false
|
||||
}
|
||||
rname := tok.String()
|
||||
p.back()
|
||||
haveConstant = !p.atStartOfRegister(rname)
|
||||
if !haveConstant {
|
||||
|
|
@ -361,6 +368,7 @@ func (p *Parser) operand(a *obj.Addr) bool {
|
|||
if p.have(scanner.String) {
|
||||
if prefix != '$' {
|
||||
p.errorf("string constant must be an immediate")
|
||||
return false
|
||||
}
|
||||
str, err := strconv.Unquote(p.get(scanner.String).String())
|
||||
if err != nil {
|
||||
|
|
@ -568,12 +576,14 @@ func (p *Parser) symbolReference(a *obj.Addr, name string, prefix rune) {
|
|||
}
|
||||
a.Sym = obj.Linklookup(p.ctxt, name, isStatic)
|
||||
if p.peek() == scanner.EOF {
|
||||
if prefix != 0 {
|
||||
p.errorf("illegal addressing mode for symbol %s", name)
|
||||
}
|
||||
if prefix == 0 && p.isJump {
|
||||
// Symbols without prefix or suffix are jump labels.
|
||||
return
|
||||
}
|
||||
// Expect (SB) or (FP), (PC), (SB), or (SP)
|
||||
p.errorf("illegal or missing addressing mode for symbol %s", name)
|
||||
return
|
||||
}
|
||||
// Expect (SB), (FP), (PC), or (SP)
|
||||
p.get('(')
|
||||
reg := p.get(scanner.Ident).String()
|
||||
p.get(')')
|
||||
|
|
@ -952,7 +962,11 @@ func (p *Parser) next() lex.Token {
|
|||
}
|
||||
|
||||
func (p *Parser) back() {
|
||||
if p.inputPos == 0 {
|
||||
p.errorf("internal error: backing up before BOL")
|
||||
} else {
|
||||
p.inputPos--
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) peek() lex.ScanToken {
|
||||
|
|
|
|||
|
|
@ -35,6 +35,8 @@ func TestErroneous(t *testing.T) {
|
|||
{"TEXT", "%", "expect two or three operands for TEXT"},
|
||||
{"TEXT", "1, 1", "TEXT symbol \"<erroneous symbol>\" must be a symbol(SB)"},
|
||||
{"TEXT", "$\"foo\", 0, $1", "TEXT symbol \"<erroneous symbol>\" must be a symbol(SB)"},
|
||||
{"TEXT", "$0É:0, 0, $1", "expected EOF, found É"}, // Issue #12467.
|
||||
{"TEXT", "$:0:(SB, 0, $1", "expected '(', found 0"}, // Issue 12468.
|
||||
{"FUNCDATA", "", "expect two operands for FUNCDATA"},
|
||||
{"FUNCDATA", "(SB ", "expect two operands for FUNCDATA"},
|
||||
{"DATA", "", "expect two operands for DATA"},
|
||||
|
|
|
|||
2
src/cmd/asm/internal/asm/testdata/arm64.out
vendored
2
src/cmd/asm/internal/asm/testdata/arm64.out
vendored
|
|
@ -37,7 +37,7 @@
|
|||
147 00037 (testdata/arm64.s:147) CSEL LT, R1, R2, ZR
|
||||
148 00038 (testdata/arm64.s:148) CSINC GT, R1, ZR, R3
|
||||
149 00039 (testdata/arm64.s:149) CSNEG MI, R1, R2, R3
|
||||
150 00040 (testdata/arm64.s:150) CSINV 0, R1, R2, R3
|
||||
150 00040 (testdata/arm64.s:150) CSINV HS, R1, R2, R3
|
||||
156 00041 (testdata/arm64.s:156) CSEL LT, R1, R2
|
||||
164 00042 (testdata/arm64.s:164) CCMN MI, ZR, R1, $4
|
||||
173 00043 (testdata/arm64.s:173) FADDD $(0.5), F1
|
||||
|
|
|
|||
|
|
@ -63,7 +63,12 @@ func predefine(defines flags.MultiFlag) map[string]*Macro {
|
|||
return macros
|
||||
}
|
||||
|
||||
var panicOnError bool // For testing.
|
||||
|
||||
func (in *Input) Error(args ...interface{}) {
|
||||
if panicOnError {
|
||||
panic(fmt.Errorf("%s:%d: %s", in.File(), in.Line(), fmt.Sprintln(args...)))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "%s:%d: %s", in.File(), in.Line(), fmt.Sprintln(args...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
|
@ -113,6 +118,10 @@ func (in *Input) Next() ScanToken {
|
|||
}
|
||||
fallthrough
|
||||
default:
|
||||
if tok == scanner.EOF && len(in.ifdefStack) > 0 {
|
||||
// We're skipping text but have run out of input with no #endif.
|
||||
in.Error("unclosed #ifdef or #ifndef")
|
||||
}
|
||||
in.beginningOfLine = tok == '\n'
|
||||
if in.enabled() {
|
||||
in.text = in.Stack.Text()
|
||||
|
|
@ -251,6 +260,9 @@ func (in *Input) macroDefinition(name string) ([]string, []Token) {
|
|||
var tokens []Token
|
||||
// Scan to newline. Backslashes escape newlines.
|
||||
for tok != '\n' {
|
||||
if tok == scanner.EOF {
|
||||
in.Error("missing newline in macro definition for %q\n", name)
|
||||
}
|
||||
if tok == '\\' {
|
||||
tok = in.Stack.Next()
|
||||
if tok != '\n' && tok != '\\' {
|
||||
|
|
|
|||
|
|
@ -226,6 +226,35 @@ var lexTests = []lexTest{
|
|||
),
|
||||
"C.\n",
|
||||
},
|
||||
{
|
||||
"nested #define",
|
||||
lines(
|
||||
"#define A #define B THIS",
|
||||
"A",
|
||||
"B",
|
||||
),
|
||||
"THIS.\n",
|
||||
},
|
||||
{
|
||||
"nested #define with args",
|
||||
lines(
|
||||
"#define A #define B(x) x",
|
||||
"A",
|
||||
"B(THIS)",
|
||||
),
|
||||
"THIS.\n",
|
||||
},
|
||||
/* This one fails. See comment in Slice.Col.
|
||||
{
|
||||
"nested #define with args",
|
||||
lines(
|
||||
"#define A #define B (x) x",
|
||||
"A",
|
||||
"B(THIS)",
|
||||
),
|
||||
"x.\n",
|
||||
},
|
||||
*/
|
||||
}
|
||||
|
||||
func TestLex(t *testing.T) {
|
||||
|
|
@ -258,3 +287,76 @@ func drain(input *Input) string {
|
|||
buf.WriteString(input.Text())
|
||||
}
|
||||
}
|
||||
|
||||
type badLexTest struct {
|
||||
input string
|
||||
error string
|
||||
}
|
||||
|
||||
var badLexTests = []badLexTest{
|
||||
{
|
||||
"3 #define foo bar\n",
|
||||
"'#' must be first item on line",
|
||||
},
|
||||
{
|
||||
"#ifdef foo\nhello",
|
||||
"unclosed #ifdef or #ifndef",
|
||||
},
|
||||
{
|
||||
"#ifndef foo\nhello",
|
||||
"unclosed #ifdef or #ifndef",
|
||||
},
|
||||
{
|
||||
"#ifdef foo\nhello\n#else\nbye",
|
||||
"unclosed #ifdef or #ifndef",
|
||||
},
|
||||
{
|
||||
"#define A() A()\nA()",
|
||||
"recursive macro invocation",
|
||||
},
|
||||
{
|
||||
"#define A a\n#define A a\n",
|
||||
"redefinition of macro",
|
||||
},
|
||||
{
|
||||
"#define A a",
|
||||
"no newline after macro definition",
|
||||
},
|
||||
}
|
||||
|
||||
func TestBadLex(t *testing.T) {
|
||||
for _, test := range badLexTests {
|
||||
input := NewInput(test.error)
|
||||
input.Push(NewTokenizer(test.error, strings.NewReader(test.input), nil))
|
||||
err := firstError(input)
|
||||
if err == nil {
|
||||
t.Errorf("%s: got no error", test.error)
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(err.Error(), test.error) {
|
||||
t.Errorf("got error %q expected %q", err.Error(), test.error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// firstError returns the first error value triggered by the input.
|
||||
func firstError(input *Input) (err error) {
|
||||
panicOnError = true
|
||||
defer func() {
|
||||
panicOnError = false
|
||||
switch e := recover(); e := e.(type) {
|
||||
case nil:
|
||||
case error:
|
||||
err = e
|
||||
default:
|
||||
panic(e)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
tok := input.Next()
|
||||
if tok == scanner.EOF {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,8 +44,16 @@ func (s *Slice) Line() int {
|
|||
}
|
||||
|
||||
func (s *Slice) Col() int {
|
||||
// Col is only called when defining a macro, which can't reach here.
|
||||
panic("cannot happen: slice col")
|
||||
// TODO: Col is only called when defining a macro and all it cares about is increasing
|
||||
// position to discover whether there is a blank before the parenthesis.
|
||||
// We only get here if defining a macro inside a macro.
|
||||
// This imperfect implementation means we cannot tell the difference between
|
||||
// #define A #define B(x) x
|
||||
// and
|
||||
// #define A #define B (x) x
|
||||
// The first has definition of B has an argument, the second doesn't. Because we let
|
||||
// text/scanner strip the blanks for us, this is extremely rare, hard to fix, and not worth it.
|
||||
return s.pos
|
||||
}
|
||||
|
||||
func (s *Slice) SetPos(line int, file string) {
|
||||
|
|
|
|||
|
|
@ -391,17 +391,13 @@ the translation process.
|
|||
|
||||
Translating Go
|
||||
|
||||
[The rest of this comment refers to 6g, the Go compiler that is part
|
||||
of the amd64 port of the gc Go toolchain. Everything here applies to
|
||||
another architecture's compilers as well.]
|
||||
|
||||
Given the input Go files x.go and y.go, cgo generates these source
|
||||
files:
|
||||
|
||||
x.cgo1.go # for 6g
|
||||
y.cgo1.go # for 6g
|
||||
_cgo_gotypes.go # for 6g
|
||||
_cgo_import.go # for 6g (if -dynout _cgo_import.go)
|
||||
x.cgo1.go # for gc (cmd/compile)
|
||||
y.cgo1.go # for gc
|
||||
_cgo_gotypes.go # for gc
|
||||
_cgo_import.go # for gc (if -dynout _cgo_import.go)
|
||||
x.cgo2.c # for gcc
|
||||
y.cgo2.c # for gcc
|
||||
_cgo_defun.c # for gcc (if -gccgo)
|
||||
|
|
@ -464,7 +460,7 @@ Linking
|
|||
|
||||
Once the _cgo_export.c and *.cgo2.c files have been compiled with gcc,
|
||||
they need to be linked into the final binary, along with the libraries
|
||||
they might depend on (in the case of puts, stdio). 6l has been
|
||||
they might depend on (in the case of puts, stdio). cmd/link has been
|
||||
extended to understand basic ELF files, but it does not understand ELF
|
||||
in the full complexity that modern C libraries embrace, so it cannot
|
||||
in general generate direct references to the system libraries.
|
||||
|
|
@ -495,23 +491,23 @@ _cgo_import.go, which looks like:
|
|||
//go:cgo_import_dynamic _ _ "libc.so.6"
|
||||
|
||||
In the end, the compiled Go package, which will eventually be
|
||||
presented to 6l as part of a larger program, contains:
|
||||
presented to cmd/link as part of a larger program, contains:
|
||||
|
||||
_go_.6 # 6g-compiled object for _cgo_gotypes.go, _cgo_import.go, *.cgo1.go
|
||||
_go_.o # gc-compiled object for _cgo_gotypes.go, _cgo_import.go, *.cgo1.go
|
||||
_all.o # gcc-compiled object for _cgo_export.c, *.cgo2.c
|
||||
|
||||
The final program will be a dynamic executable, so that 6l can avoid
|
||||
The final program will be a dynamic executable, so that cmd/link can avoid
|
||||
needing to process arbitrary .o files. It only needs to process the .o
|
||||
files generated from C files that cgo writes, and those are much more
|
||||
limited in the ELF or other features that they use.
|
||||
|
||||
In essence, the _cgo_import.6 file includes the extra linking
|
||||
directives that 6l is not sophisticated enough to derive from _all.o
|
||||
In essence, the _cgo_import.o file includes the extra linking
|
||||
directives that cmd/link is not sophisticated enough to derive from _all.o
|
||||
on its own. Similarly, the _all.o uses dynamic references to real
|
||||
system object code because 6l is not sophisticated enough to process
|
||||
system object code because cmd/link is not sophisticated enough to process
|
||||
the real code.
|
||||
|
||||
The main benefits of this system are that 6l remains relatively simple
|
||||
The main benefits of this system are that cmd/link remains relatively simple
|
||||
(it does not need to implement a complete ELF and Mach-O linker) and
|
||||
that gcc is not needed after the package is compiled. For example,
|
||||
package net uses cgo for access to name resolution functions provided
|
||||
|
|
@ -540,17 +536,17 @@ system calls.
|
|||
|
||||
Internal and External Linking
|
||||
|
||||
The text above describes "internal" linking, in which 6l parses and
|
||||
The text above describes "internal" linking, in which cmd/link parses and
|
||||
links host object files (ELF, Mach-O, PE, and so on) into the final
|
||||
executable itself. Keeping 6l simple means we cannot possibly
|
||||
executable itself. Keeping cmd/link simple means we cannot possibly
|
||||
implement the full semantics of the host linker, so the kinds of
|
||||
objects that can be linked directly into the binary is limited (other
|
||||
code can only be used as a dynamic library). On the other hand, when
|
||||
using internal linking, 6l can generate Go binaries by itself.
|
||||
using internal linking, cmd/link can generate Go binaries by itself.
|
||||
|
||||
In order to allow linking arbitrary object files without requiring
|
||||
dynamic libraries, cgo supports an "external" linking mode too. In
|
||||
external linking mode, 6l does not process any host object files.
|
||||
external linking mode, cmd/link does not process any host object files.
|
||||
Instead, it collects all the Go code and writes a single go.o object
|
||||
file containing it. Then it invokes the host linker (usually gcc) to
|
||||
combine the go.o object file and any supporting non-Go code into a
|
||||
|
|
@ -582,8 +578,8 @@ to be made when linking the final binary.
|
|||
Linking Directives
|
||||
|
||||
In either linking mode, package-specific directives must be passed
|
||||
through to 6l. These are communicated by writing //go: directives in a
|
||||
Go source file compiled by 6g. The directives are copied into the .6
|
||||
through to cmd/link. These are communicated by writing //go: directives in a
|
||||
Go source file compiled by gc. The directives are copied into the .o
|
||||
object file and then processed by the linker.
|
||||
|
||||
The directives are:
|
||||
|
|
@ -672,7 +668,7 @@ Example
|
|||
As a simple example, consider a package that uses cgo to call C.sin.
|
||||
The following code will be generated by cgo:
|
||||
|
||||
// compiled by 6g
|
||||
// compiled by gc
|
||||
|
||||
//go:cgo_ldflag "-lm"
|
||||
|
||||
|
|
@ -708,7 +704,7 @@ Otherwise the link will be an internal one.
|
|||
The linking directives are used according to the kind of final link
|
||||
used.
|
||||
|
||||
In internal mode, 6l itself processes all the host object files, in
|
||||
In internal mode, cmd/link itself processes all the host object files, in
|
||||
particular foo.cgo2.o. To do so, it uses the cgo_import_dynamic and
|
||||
cgo_dynamic_linker directives to learn that the otherwise undefined
|
||||
reference to sin in foo.cgo2.o should be rewritten to refer to the
|
||||
|
|
@ -716,56 +712,56 @@ symbol sin with version GLIBC_2.2.5 from the dynamic library
|
|||
"libm.so.6", and the binary should request "/lib/ld-linux.so.2" as its
|
||||
runtime dynamic linker.
|
||||
|
||||
In external mode, 6l does not process any host object files, in
|
||||
particular foo.cgo2.o. It links together the 6g-generated object
|
||||
In external mode, cmd/link does not process any host object files, in
|
||||
particular foo.cgo2.o. It links together the gc-generated object
|
||||
files, along with any other Go code, into a go.o file. While doing
|
||||
that, 6l will discover that there is no definition for
|
||||
_cgo_gcc_Cfunc_sin, referred to by the 6g-compiled source file. This
|
||||
is okay, because 6l also processes the cgo_import_static directive and
|
||||
that, cmd/link will discover that there is no definition for
|
||||
_cgo_gcc_Cfunc_sin, referred to by the gc-compiled source file. This
|
||||
is okay, because cmd/link also processes the cgo_import_static directive and
|
||||
knows that _cgo_gcc_Cfunc_sin is expected to be supplied by a host
|
||||
object file, so 6l does not treat the missing symbol as an error when
|
||||
object file, so cmd/link does not treat the missing symbol as an error when
|
||||
creating go.o. Indeed, the definition for _cgo_gcc_Cfunc_sin will be
|
||||
provided to the host linker by foo2.cgo.o, which in turn will need the
|
||||
symbol 'sin'. 6l also processes the cgo_ldflag directives, so that it
|
||||
symbol 'sin'. cmd/link also processes the cgo_ldflag directives, so that it
|
||||
knows that the eventual host link command must include the -lm
|
||||
argument, so that the host linker will be able to find 'sin' in the
|
||||
math library.
|
||||
|
||||
6l Command Line Interface
|
||||
cmd/link Command Line Interface
|
||||
|
||||
The go command and any other Go-aware build systems invoke 6l
|
||||
to link a collection of packages into a single binary. By default, 6l will
|
||||
The go command and any other Go-aware build systems invoke cmd/link
|
||||
to link a collection of packages into a single binary. By default, cmd/link will
|
||||
present the same interface it does today:
|
||||
|
||||
6l main.a
|
||||
cmd/link main.a
|
||||
|
||||
produces a file named 6.out, even if 6l does so by invoking the host
|
||||
produces a file named a.out, even if cmd/link does so by invoking the host
|
||||
linker in external linking mode.
|
||||
|
||||
By default, 6l will decide the linking mode as follows: if the only
|
||||
By default, cmd/link will decide the linking mode as follows: if the only
|
||||
packages using cgo are those on a whitelist of standard library
|
||||
packages (net, os/user, runtime/cgo), 6l will use internal linking
|
||||
mode. Otherwise, there are non-standard cgo packages involved, and 6l
|
||||
packages (net, os/user, runtime/cgo), cmd/link will use internal linking
|
||||
mode. Otherwise, there are non-standard cgo packages involved, and cmd/link
|
||||
will use external linking mode. The first rule means that a build of
|
||||
the godoc binary, which uses net but no other cgo, can run without
|
||||
needing gcc available. The second rule means that a build of a
|
||||
cgo-wrapped library like sqlite3 can generate a standalone executable
|
||||
instead of needing to refer to a dynamic library. The specific choice
|
||||
can be overridden using a command line flag: 6l -linkmode=internal or
|
||||
6l -linkmode=external.
|
||||
can be overridden using a command line flag: cmd/link -linkmode=internal or
|
||||
cmd/link -linkmode=external.
|
||||
|
||||
In an external link, 6l will create a temporary directory, write any
|
||||
In an external link, cmd/link will create a temporary directory, write any
|
||||
host object files found in package archives to that directory (renamed
|
||||
to avoid conflicts), write the go.o file to that directory, and invoke
|
||||
the host linker. The default value for the host linker is $CC, split
|
||||
into fields, or else "gcc". The specific host linker command line can
|
||||
be overridden using command line flags: 6l -extld=clang
|
||||
be overridden using command line flags: cmd/link -extld=clang
|
||||
-extldflags='-ggdb -O3'. If any package in a build includes a .cc or
|
||||
other file compiled by the C++ compiler, the go tool will use the
|
||||
-extld option to set the host linker to the C++ compiler.
|
||||
|
||||
These defaults mean that Go-aware build systems can ignore the linking
|
||||
changes and keep running plain '6l' and get reasonable results, but
|
||||
changes and keep running plain 'cmd/link' and get reasonable results, but
|
||||
they can also control the linking details if desired.
|
||||
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -607,6 +607,10 @@ func (p *Package) rewriteRef(f *File) {
|
|||
if r.Name.Kind != "func" {
|
||||
if r.Name.Kind == "type" {
|
||||
r.Context = "type"
|
||||
if r.Name.Type == nil {
|
||||
error_(r.Pos(), "invalid conversion to C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C)
|
||||
break
|
||||
}
|
||||
expr = r.Name.Type.Go
|
||||
break
|
||||
}
|
||||
|
|
@ -658,6 +662,10 @@ func (p *Package) rewriteRef(f *File) {
|
|||
}
|
||||
} else if r.Name.Kind == "type" {
|
||||
// Okay - might be new(T)
|
||||
if r.Name.Type == nil {
|
||||
error_(r.Pos(), "expression C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C)
|
||||
break
|
||||
}
|
||||
expr = r.Name.Type.Go
|
||||
} else if r.Name.Kind == "var" {
|
||||
expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr}
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ import (
|
|||
"go/printer"
|
||||
"go/token"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
|
@ -19,7 +20,7 @@ func (p *Package) godefs(f *File, srcfile string) string {
|
|||
var buf bytes.Buffer
|
||||
|
||||
fmt.Fprintf(&buf, "// Created by cgo -godefs - DO NOT EDIT\n")
|
||||
fmt.Fprintf(&buf, "// %s\n", strings.Join(os.Args, " "))
|
||||
fmt.Fprintf(&buf, "// %s %s\n", filepath.Base(os.Args[0]), strings.Join(os.Args[1:], " "))
|
||||
fmt.Fprintf(&buf, "\n")
|
||||
|
||||
override := make(map[string]string)
|
||||
|
|
|
|||
|
|
@ -279,11 +279,7 @@ func main() {
|
|||
if nerrors > 0 {
|
||||
os.Exit(2)
|
||||
}
|
||||
pkg := f.Package
|
||||
if dir := os.Getenv("CGOPKGPATH"); dir != "" {
|
||||
pkg = filepath.Join(dir, pkg)
|
||||
}
|
||||
p.PackagePath = pkg
|
||||
p.PackagePath = f.Package
|
||||
p.Record(f)
|
||||
if *godefs {
|
||||
os.Stdout.WriteString(p.godefs(f, input))
|
||||
|
|
|
|||
|
|
@ -933,23 +933,15 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) {
|
|||
fmt.Fprintf(fgcch, "\n%s", exp.Doc)
|
||||
}
|
||||
|
||||
fmt.Fprintf(fgcch, "extern %s %s %s;\n", cRet, exp.ExpName, cParams)
|
||||
|
||||
// We need to use a name that will be exported by the
|
||||
// Go code; otherwise gccgo will make it static and we
|
||||
// will not be able to link against it from the C
|
||||
// code.
|
||||
goName := "Cgoexp_" + exp.ExpName
|
||||
fmt.Fprintf(fgcch, `extern %s %s %s __asm__("%s.%s");`, cRet, goName, cParams, gccgoSymbolPrefix, goName)
|
||||
fmt.Fprint(fgcch, "\n")
|
||||
|
||||
// Use a #define so that the C code that includes
|
||||
// cgo_export.h will be able to refer to the Go
|
||||
// function using the expected name.
|
||||
fmt.Fprintf(fgcch, "#define %s %s\n", exp.ExpName, goName)
|
||||
|
||||
// Use a #undef in _cgo_export.c so that we ignore the
|
||||
// #define from cgo_export.h, since here we are
|
||||
// defining the real function.
|
||||
fmt.Fprintf(fgcc, "#undef %s\n", exp.ExpName)
|
||||
fmt.Fprintf(fgcc, `extern %s %s %s __asm__("%s.%s");`, cRet, goName, cParams, gccgoSymbolPrefix, goName)
|
||||
fmt.Fprint(fgcc, "\n")
|
||||
|
||||
fmt.Fprint(fgcc, "\n")
|
||||
fmt.Fprintf(fgcc, "%s %s %s {\n", cRet, exp.ExpName, cParams)
|
||||
|
|
|
|||
|
|
@ -80,17 +80,27 @@ func blockcopy(n, ns *gc.Node, osrc, odst, w int64) {
|
|||
gins(x86.ACLD, nil, nil)
|
||||
} else {
|
||||
// normal direction
|
||||
if q > 128 || (gc.Nacl && q >= 4) {
|
||||
if q > 128 || (gc.Nacl && q >= 4) || (obj.Getgoos() == "plan9" && q >= 4) {
|
||||
gconreg(movptr, q, x86.REG_CX)
|
||||
gins(x86.AREP, nil, nil) // repeat
|
||||
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
|
||||
} else if q >= 4 {
|
||||
var oldx0 gc.Node
|
||||
var x0 gc.Node
|
||||
savex(x86.REG_X0, &x0, &oldx0, nil, gc.Types[gc.TFLOAT64])
|
||||
|
||||
p := gins(obj.ADUFFCOPY, nil, nil)
|
||||
p.To.Type = obj.TYPE_ADDR
|
||||
p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
|
||||
|
||||
// 14 and 128 = magic constants: see ../../runtime/asm_amd64.s
|
||||
p.To.Offset = 14 * (128 - q)
|
||||
// 64 blocks taking 14 bytes each
|
||||
// see ../../../../runtime/mkduff.go
|
||||
p.To.Offset = 14 * (64 - q/2)
|
||||
restx(&x0, &oldx0)
|
||||
|
||||
if q%2 != 0 {
|
||||
gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
|
||||
}
|
||||
} else if !gc.Nacl && c == 0 {
|
||||
// We don't need the MOVSQ side-effect of updating SI and DI,
|
||||
// and issuing a sequence of MOVQs directly is faster.
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ func defframe(ptxt *obj.Prog) {
|
|||
hi := int64(0)
|
||||
lo := hi
|
||||
ax := uint32(0)
|
||||
x0 := uint32(0)
|
||||
|
||||
// iterate through declarations - they are sorted in decreasing xoffset order.
|
||||
for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
|
||||
|
|
@ -50,7 +51,7 @@ func defframe(ptxt *obj.Prog) {
|
|||
}
|
||||
|
||||
// zero old range
|
||||
p = zerorange(p, int64(frame), lo, hi, &ax)
|
||||
p = zerorange(p, int64(frame), lo, hi, &ax, &x0)
|
||||
|
||||
// set new range
|
||||
hi = n.Xoffset + n.Type.Width
|
||||
|
|
@ -59,88 +60,104 @@ func defframe(ptxt *obj.Prog) {
|
|||
}
|
||||
|
||||
// zero final range
|
||||
zerorange(p, int64(frame), lo, hi, &ax)
|
||||
zerorange(p, int64(frame), lo, hi, &ax, &x0)
|
||||
}
|
||||
|
||||
// DUFFZERO consists of repeated blocks of 4 MOVs + ADD,
|
||||
// with 4 STOSQs at the very end.
|
||||
// The trailing STOSQs prevent the need for a DI preadjustment
|
||||
// for small numbers of words to clear.
|
||||
// DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD,
|
||||
// See runtime/mkduff.go.
|
||||
const (
|
||||
dzBlocks = 31 // number of MOV/ADD blocks
|
||||
dzBlocks = 16 // number of MOV/ADD blocks
|
||||
dzBlockLen = 4 // number of clears per block
|
||||
dzBlockSize = 19 // size of instructions in a single block
|
||||
dzMovSize = 4 // size of single MOV instruction w/ offset
|
||||
dzAddSize = 4 // size of single ADD instruction
|
||||
dzDIStep = 8 // number of bytes cleared by each MOV instruction
|
||||
dzClearStep = 16 // number of bytes cleared by each MOV instruction
|
||||
|
||||
dzTailLen = 4 // number of final STOSQ instructions
|
||||
dzTailSize = 2 // size of single STOSQ instruction
|
||||
|
||||
dzSize = dzBlocks*dzBlockSize + dzTailLen*dzTailSize // total size of DUFFZERO routine
|
||||
dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block
|
||||
dzSize = dzBlocks * dzBlockSize
|
||||
)
|
||||
|
||||
// duffzeroDI returns the pre-adjustment to DI for a call to DUFFZERO.
|
||||
// q is the number of words to zero.
|
||||
func dzDI(q int64) int64 {
|
||||
if q < dzTailLen {
|
||||
return 0
|
||||
}
|
||||
q -= dzTailLen
|
||||
if q%dzBlockLen == 0 {
|
||||
return 0
|
||||
}
|
||||
return -dzDIStep * (dzBlockLen - q%dzBlockLen)
|
||||
}
|
||||
|
||||
// dzOff returns the offset for a jump into DUFFZERO.
|
||||
// q is the number of words to zero.
|
||||
func dzOff(q int64) int64 {
|
||||
// b is the number of bytes to zero.
|
||||
func dzOff(b int64) int64 {
|
||||
off := int64(dzSize)
|
||||
if q < dzTailLen {
|
||||
return off - q*dzTailSize
|
||||
}
|
||||
off -= dzTailLen * dzTailSize
|
||||
q -= dzTailLen
|
||||
blocks, steps := q/dzBlockLen, q%dzBlockLen
|
||||
off -= dzBlockSize * blocks
|
||||
if steps > 0 {
|
||||
off -= dzAddSize + dzMovSize*steps
|
||||
off -= b / dzClearLen * dzBlockSize
|
||||
tailLen := b % dzClearLen
|
||||
if tailLen >= dzClearStep {
|
||||
off -= dzAddSize + dzMovSize*(tailLen/dzClearStep)
|
||||
}
|
||||
return off
|
||||
}
|
||||
|
||||
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
|
||||
// duffzeroDI returns the pre-adjustment to DI for a call to DUFFZERO.
|
||||
// b is the number of bytes to zero.
|
||||
func dzDI(b int64) int64 {
|
||||
tailLen := b % dzClearLen
|
||||
if tailLen < dzClearStep {
|
||||
return 0
|
||||
}
|
||||
tailSteps := tailLen / dzClearStep
|
||||
return -dzClearStep * (dzBlockLen - tailSteps)
|
||||
}
|
||||
|
||||
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32, x0 *uint32) *obj.Prog {
|
||||
cnt := hi - lo
|
||||
if cnt == 0 {
|
||||
return p
|
||||
}
|
||||
if *ax == 0 {
|
||||
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
||||
*ax = 1
|
||||
}
|
||||
|
||||
if cnt%int64(gc.Widthreg) != 0 {
|
||||
// should only happen with nacl
|
||||
if cnt%int64(gc.Widthptr) != 0 {
|
||||
gc.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
|
||||
}
|
||||
if *ax == 0 {
|
||||
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
||||
*ax = 1
|
||||
}
|
||||
p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
|
||||
lo += int64(gc.Widthptr)
|
||||
cnt -= int64(gc.Widthptr)
|
||||
}
|
||||
|
||||
if cnt <= int64(4*gc.Widthreg) {
|
||||
for i := int64(0); i < cnt; i += int64(gc.Widthreg) {
|
||||
p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i)
|
||||
if cnt == 8 {
|
||||
if *ax == 0 {
|
||||
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
||||
*ax = 1
|
||||
}
|
||||
p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
|
||||
} else if cnt <= int64(8*gc.Widthreg) {
|
||||
if *x0 == 0 {
|
||||
p = appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
|
||||
*x0 = 1
|
||||
}
|
||||
|
||||
for i := int64(0); i < cnt/16; i++ {
|
||||
p = appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i*16)
|
||||
}
|
||||
|
||||
if cnt%16 != 0 {
|
||||
p = appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+cnt-int64(16))
|
||||
}
|
||||
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthreg)) {
|
||||
q := cnt / int64(gc.Widthreg)
|
||||
p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo+dzDI(q), obj.TYPE_REG, x86.REG_DI, 0)
|
||||
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(q))
|
||||
if *x0 == 0 {
|
||||
p = appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
|
||||
*x0 = 1
|
||||
}
|
||||
|
||||
p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
|
||||
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
|
||||
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
|
||||
|
||||
if cnt%16 != 0 {
|
||||
p = appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
|
||||
}
|
||||
} else {
|
||||
if *ax == 0 {
|
||||
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
|
||||
*ax = 1
|
||||
}
|
||||
|
||||
p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
|
||||
p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
|
||||
p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
|
||||
|
|
@ -537,54 +554,14 @@ func clearfat(nl *gc.Node) {
|
|||
gc.Dump("\nclearfat", nl)
|
||||
}
|
||||
|
||||
w := nl.Type.Width
|
||||
|
||||
// Avoid taking the address for simple enough types.
|
||||
if gc.Componentgen(nil, nl) {
|
||||
return
|
||||
}
|
||||
|
||||
c := w % 8 // bytes
|
||||
q := w / 8 // quads
|
||||
|
||||
if q < 4 {
|
||||
// Write sequence of MOV 0, off(base) instead of using STOSQ.
|
||||
// The hope is that although the code will be slightly longer,
|
||||
// the MOVs will have no dependencies and pipeline better
|
||||
// than the unrolled STOSQ loop.
|
||||
// NOTE: Must use agen, not igen, so that optimizer sees address
|
||||
// being taken. We are not writing on field boundaries.
|
||||
var n1 gc.Node
|
||||
gc.Agenr(nl, &n1, nil)
|
||||
|
||||
n1.Op = gc.OINDREG
|
||||
var z gc.Node
|
||||
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
|
||||
for ; q > 0; q-- {
|
||||
n1.Type = z.Type
|
||||
gins(x86.AMOVQ, &z, &n1)
|
||||
n1.Xoffset += 8
|
||||
}
|
||||
|
||||
if c >= 4 {
|
||||
gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
|
||||
n1.Type = z.Type
|
||||
gins(x86.AMOVL, &z, &n1)
|
||||
n1.Xoffset += 4
|
||||
c -= 4
|
||||
}
|
||||
|
||||
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
|
||||
for ; c > 0; c-- {
|
||||
n1.Type = z.Type
|
||||
gins(x86.AMOVB, &z, &n1)
|
||||
n1.Xoffset++
|
||||
}
|
||||
|
||||
gc.Regfree(&n1)
|
||||
return
|
||||
}
|
||||
w := nl.Type.Width
|
||||
|
||||
if w > 1024 || (gc.Nacl && w >= 64) {
|
||||
var oldn1 gc.Node
|
||||
var n1 gc.Node
|
||||
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
|
||||
|
|
@ -594,49 +571,133 @@ func clearfat(nl *gc.Node) {
|
|||
var oldax gc.Node
|
||||
savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
|
||||
gconreg(x86.AMOVL, 0, x86.REG_AX)
|
||||
gconreg(movptr, w/8, x86.REG_CX)
|
||||
|
||||
if q > 128 || gc.Nacl {
|
||||
gconreg(movptr, q, x86.REG_CX)
|
||||
gins(x86.AREP, nil, nil) // repeat
|
||||
gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
|
||||
} else {
|
||||
if di := dzDI(q); di != 0 {
|
||||
|
||||
if w%8 != 0 {
|
||||
n1.Op = gc.OINDREG
|
||||
clearfat_tail(&n1, w%8)
|
||||
}
|
||||
|
||||
restx(&n1, &oldn1)
|
||||
restx(&ax, &oldax)
|
||||
return
|
||||
}
|
||||
|
||||
if w >= 64 {
|
||||
var oldn1 gc.Node
|
||||
var n1 gc.Node
|
||||
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
|
||||
gc.Agen(nl, &n1)
|
||||
|
||||
var vec_zero gc.Node
|
||||
var old_x0 gc.Node
|
||||
savex(x86.REG_X0, &vec_zero, &old_x0, nil, gc.Types[gc.TFLOAT64])
|
||||
gins(x86.AXORPS, &vec_zero, &vec_zero)
|
||||
|
||||
if di := dzDI(w); di != 0 {
|
||||
gconreg(addptr, di, x86.REG_DI)
|
||||
}
|
||||
p := gins(obj.ADUFFZERO, nil, nil)
|
||||
p.To.Type = obj.TYPE_ADDR
|
||||
p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
|
||||
p.To.Offset = dzOff(q)
|
||||
}
|
||||
|
||||
z := ax
|
||||
di := n1
|
||||
if w >= 8 && c >= 4 {
|
||||
di.Op = gc.OINDREG
|
||||
z.Type = gc.Types[gc.TINT64]
|
||||
di.Type = z.Type
|
||||
p := gins(x86.AMOVQ, &z, &di)
|
||||
p.To.Scale = 1
|
||||
p.To.Offset = c - 8
|
||||
} else if c >= 4 {
|
||||
di.Op = gc.OINDREG
|
||||
z.Type = gc.Types[gc.TINT32]
|
||||
di.Type = z.Type
|
||||
gins(x86.AMOVL, &z, &di)
|
||||
if c > 4 {
|
||||
p := gins(x86.AMOVL, &z, &di)
|
||||
p.To.Scale = 1
|
||||
p.To.Offset = c - 4
|
||||
}
|
||||
} else {
|
||||
for c > 0 {
|
||||
gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+
|
||||
c--
|
||||
}
|
||||
p.To.Offset = dzOff(w)
|
||||
|
||||
if w%16 != 0 {
|
||||
n1.Op = gc.OINDREG
|
||||
n1.Xoffset -= 16 - w%16
|
||||
gins(x86.AMOVUPS, &vec_zero, &n1)
|
||||
}
|
||||
|
||||
restx(&vec_zero, &old_x0)
|
||||
restx(&n1, &oldn1)
|
||||
restx(&ax, &oldax)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: Must use agen, not igen, so that optimizer sees address
|
||||
// being taken. We are not writing on field boundaries.
|
||||
var n1 gc.Node
|
||||
gc.Agenr(nl, &n1, nil)
|
||||
n1.Op = gc.OINDREG
|
||||
|
||||
clearfat_tail(&n1, w)
|
||||
|
||||
gc.Regfree(&n1)
|
||||
}
|
||||
|
||||
func clearfat_tail(n1 *gc.Node, b int64) {
|
||||
if b >= 16 {
|
||||
var vec_zero gc.Node
|
||||
gc.Regalloc(&vec_zero, gc.Types[gc.TFLOAT64], nil)
|
||||
gins(x86.AXORPS, &vec_zero, &vec_zero)
|
||||
|
||||
for b >= 16 {
|
||||
gins(x86.AMOVUPS, &vec_zero, n1)
|
||||
n1.Xoffset += 16
|
||||
b -= 16
|
||||
}
|
||||
|
||||
// MOVUPS X0, off(base) is a few bytes shorter than MOV 0, off(base)
|
||||
if b != 0 {
|
||||
n1.Xoffset -= 16 - b
|
||||
gins(x86.AMOVUPS, &vec_zero, n1)
|
||||
}
|
||||
|
||||
gc.Regfree(&vec_zero)
|
||||
return
|
||||
}
|
||||
|
||||
// Write sequence of MOV 0, off(base) instead of using STOSQ.
|
||||
// The hope is that although the code will be slightly longer,
|
||||
// the MOVs will have no dependencies and pipeline better
|
||||
// than the unrolled STOSQ loop.
|
||||
var z gc.Node
|
||||
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
|
||||
if b >= 8 {
|
||||
n1.Type = z.Type
|
||||
gins(x86.AMOVQ, &z, n1)
|
||||
n1.Xoffset += 8
|
||||
b -= 8
|
||||
|
||||
if b != 0 {
|
||||
n1.Xoffset -= 8 - b
|
||||
gins(x86.AMOVQ, &z, n1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if b >= 4 {
|
||||
gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
|
||||
n1.Type = z.Type
|
||||
gins(x86.AMOVL, &z, n1)
|
||||
n1.Xoffset += 4
|
||||
b -= 4
|
||||
|
||||
if b != 0 {
|
||||
n1.Xoffset -= 4 - b
|
||||
gins(x86.AMOVL, &z, n1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if b >= 2 {
|
||||
gc.Nodconst(&z, gc.Types[gc.TUINT16], 0)
|
||||
n1.Type = z.Type
|
||||
gins(x86.AMOVW, &z, n1)
|
||||
n1.Xoffset += 2
|
||||
b -= 2
|
||||
}
|
||||
|
||||
gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
|
||||
for b > 0 {
|
||||
n1.Type = z.Type
|
||||
gins(x86.AMOVB, &z, n1)
|
||||
n1.Xoffset++
|
||||
b--
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Called after regopt and peep have run.
|
||||
|
|
|
|||
|
|
@ -136,6 +136,7 @@ var progtable = [x86.ALAST]obj.ProgInfo{
|
|||
x86.AMOVL: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move},
|
||||
x86.AMOVQ: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
|
||||
x86.AMOVW: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move},
|
||||
x86.AMOVUPS: {Flags: gc.LeftRead | gc.RightWrite | gc.Move},
|
||||
x86.AMOVSB: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
|
||||
x86.AMOVSL: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
|
||||
x86.AMOVSQ: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
|
||||
|
|
@ -248,6 +249,7 @@ var progtable = [x86.ALAST]obj.ProgInfo{
|
|||
x86.AXORL: {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
|
||||
x86.AXORQ: {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry},
|
||||
x86.AXORW: {Flags: gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry},
|
||||
x86.AXORPS: {Flags: gc.LeftRead | RightRdwr},
|
||||
}
|
||||
|
||||
func progflags(p *obj.Prog) uint32 {
|
||||
|
|
|
|||
|
|
@ -467,30 +467,18 @@ hard:
|
|||
return
|
||||
}
|
||||
|
||||
func intLiteral(n *gc.Node) (x int64, ok bool) {
|
||||
switch {
|
||||
case n == nil:
|
||||
return
|
||||
case gc.Isconst(n, gc.CTINT):
|
||||
return n.Int(), true
|
||||
case gc.Isconst(n, gc.CTBOOL):
|
||||
return int64(obj.Bool2int(n.Bool())), true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// gins is called by the front end.
|
||||
// It synthesizes some multiple-instruction sequences
|
||||
// so the front end can stay simpler.
|
||||
func gins(as int, f, t *gc.Node) *obj.Prog {
|
||||
if as >= obj.A_ARCHSPECIFIC {
|
||||
if x, ok := intLiteral(f); ok {
|
||||
if x, ok := f.IntLiteral(); ok {
|
||||
ginscon(as, x, t)
|
||||
return nil // caller must not use
|
||||
}
|
||||
}
|
||||
if as == arm64.ACMP {
|
||||
if x, ok := intLiteral(t); ok {
|
||||
if x, ok := t.IntLiteral(); ok {
|
||||
ginscon2(as, f, x)
|
||||
return nil // caller must not use
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1272,7 +1272,7 @@ func (z *Float) usub(x, y *Float) {
|
|||
ex = ey
|
||||
}
|
||||
|
||||
// operands may have cancelled each other out
|
||||
// operands may have canceled each other out
|
||||
if len(z.mant) == 0 {
|
||||
z.acc = Exact
|
||||
z.form = zero
|
||||
|
|
|
|||
|
|
@ -698,7 +698,9 @@ func TestGcd(t *testing.T) {
|
|||
testGcd(t, d, x, y, a, b)
|
||||
}
|
||||
|
||||
quick.Check(checkGcd, nil)
|
||||
if err := quick.Check(checkGcd, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
var primes = []string{
|
||||
|
|
|
|||
|
|
@ -810,10 +810,7 @@ func cgen_wbptr(n, res *Node) {
|
|||
a := &p.To
|
||||
a.Type = obj.TYPE_MEM
|
||||
a.Reg = int16(Thearch.REGSP)
|
||||
a.Offset = 0
|
||||
if HasLinkRegister() {
|
||||
a.Offset += int64(Widthptr)
|
||||
}
|
||||
a.Offset = Ctxt.FixedFrameSize()
|
||||
p2 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, nil)
|
||||
p2.To = p.To
|
||||
p2.To.Offset += int64(Widthptr)
|
||||
|
|
@ -849,10 +846,7 @@ func cgen_wbfat(n, res *Node) {
|
|||
a := &p.To
|
||||
a.Type = obj.TYPE_MEM
|
||||
a.Reg = int16(Thearch.REGSP)
|
||||
a.Offset = 0
|
||||
if HasLinkRegister() {
|
||||
a.Offset += int64(Widthptr)
|
||||
}
|
||||
a.Offset = Ctxt.FixedFrameSize()
|
||||
if needType {
|
||||
a.Offset += int64(Widthptr)
|
||||
}
|
||||
|
|
@ -1686,10 +1680,7 @@ func Igen(n *Node, a *Node, res *Node) {
|
|||
a.Op = OINDREG
|
||||
a.Reg = int16(Thearch.REGSP)
|
||||
a.Addable = true
|
||||
a.Xoffset = fp.Width
|
||||
if HasLinkRegister() {
|
||||
a.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
||||
}
|
||||
a.Xoffset = fp.Width + Ctxt.FixedFrameSize()
|
||||
a.Type = n.Type
|
||||
return
|
||||
|
||||
|
|
@ -2219,11 +2210,7 @@ func stkof(n *Node) int64 {
|
|||
var flist Iter
|
||||
t = Structfirst(&flist, Getoutarg(t))
|
||||
if t != nil {
|
||||
w := t.Width
|
||||
if HasLinkRegister() {
|
||||
w += int64(Ctxt.Arch.Ptrsize)
|
||||
}
|
||||
return w
|
||||
return t.Width + Ctxt.FixedFrameSize()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2379,17 +2366,11 @@ func Ginscall(f *Node, proc int) {
|
|||
// size of arguments at 0(SP)
|
||||
stk.Op = OINDREG
|
||||
stk.Reg = int16(Thearch.REGSP)
|
||||
stk.Xoffset = 0
|
||||
if HasLinkRegister() {
|
||||
stk.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
||||
}
|
||||
stk.Xoffset = Ctxt.FixedFrameSize()
|
||||
Thearch.Ginscon(Thearch.Optoas(OAS, Types[TINT32]), int64(Argsize(f.Type)), &stk)
|
||||
|
||||
// FuncVal* at 8(SP)
|
||||
stk.Xoffset = int64(Widthptr)
|
||||
if HasLinkRegister() {
|
||||
stk.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
||||
}
|
||||
stk.Xoffset = int64(Widthptr) + Ctxt.FixedFrameSize()
|
||||
|
||||
var reg Node
|
||||
Nodreg(®, Types[Tptr], Thearch.REGCALLX2)
|
||||
|
|
@ -2447,10 +2428,7 @@ func cgen_callinter(n *Node, res *Node, proc int) {
|
|||
|
||||
var nodsp Node
|
||||
Nodindreg(&nodsp, Types[Tptr], Thearch.REGSP)
|
||||
nodsp.Xoffset = 0
|
||||
if HasLinkRegister() {
|
||||
nodsp.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
||||
}
|
||||
nodsp.Xoffset = Ctxt.FixedFrameSize()
|
||||
if proc != 0 {
|
||||
nodsp.Xoffset += 2 * int64(Widthptr) // leave room for size & fn
|
||||
}
|
||||
|
|
@ -2541,11 +2519,6 @@ func cgen_call(n *Node, proc int) {
|
|||
Ginscall(n.Left, proc)
|
||||
}
|
||||
|
||||
func HasLinkRegister() bool {
|
||||
c := Ctxt.Arch.Thechar
|
||||
return c != '6' && c != '8'
|
||||
}
|
||||
|
||||
/*
|
||||
* call to n has already been generated.
|
||||
* generate:
|
||||
|
|
@ -2568,10 +2541,7 @@ func cgen_callret(n *Node, res *Node) {
|
|||
nod.Reg = int16(Thearch.REGSP)
|
||||
nod.Addable = true
|
||||
|
||||
nod.Xoffset = fp.Width
|
||||
if HasLinkRegister() {
|
||||
nod.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
||||
}
|
||||
nod.Xoffset = fp.Width + Ctxt.FixedFrameSize()
|
||||
nod.Type = fp.Type
|
||||
Cgen_as(res, &nod)
|
||||
}
|
||||
|
|
@ -2597,10 +2567,7 @@ func cgen_aret(n *Node, res *Node) {
|
|||
nod1.Op = OINDREG
|
||||
nod1.Reg = int16(Thearch.REGSP)
|
||||
nod1.Addable = true
|
||||
nod1.Xoffset = fp.Width
|
||||
if HasLinkRegister() {
|
||||
nod1.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
||||
}
|
||||
nod1.Xoffset = fp.Width + Ctxt.FixedFrameSize()
|
||||
nod1.Type = fp.Type
|
||||
|
||||
if res.Op != OREGISTER {
|
||||
|
|
@ -2858,10 +2825,7 @@ func cgen_append(n, res *Node) {
|
|||
arg.Op = OINDREG
|
||||
arg.Reg = int16(Thearch.REGSP)
|
||||
arg.Addable = true
|
||||
arg.Xoffset = 0
|
||||
if HasLinkRegister() {
|
||||
arg.Xoffset = int64(Ctxt.Arch.Ptrsize)
|
||||
}
|
||||
arg.Xoffset = Ctxt.FixedFrameSize()
|
||||
arg.Type = Ptrto(Types[TUINT8])
|
||||
Cgen(typename(res.Type), &arg)
|
||||
arg.Xoffset += int64(Widthptr)
|
||||
|
|
|
|||
|
|
@ -10,6 +10,19 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// IntLiteral returns the Node's literal value as an interger.
|
||||
func (n *Node) IntLiteral() (x int64, ok bool) {
|
||||
switch {
|
||||
case n == nil:
|
||||
return
|
||||
case Isconst(n, CTINT):
|
||||
return n.Int(), true
|
||||
case Isconst(n, CTBOOL):
|
||||
return int64(obj.Bool2int(n.Bool())), true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Int returns n as an int.
|
||||
// n must be an integer constant.
|
||||
func (n *Node) Int() int64 {
|
||||
|
|
@ -434,19 +447,8 @@ func overflow(v Val, t *Type) {
|
|||
return
|
||||
}
|
||||
|
||||
if !doesoverflow(v, t) {
|
||||
return
|
||||
}
|
||||
|
||||
switch v.Ctype() {
|
||||
case CTINT, CTRUNE:
|
||||
Yyerror("constant %v overflows %v", v.U.(*Mpint), t)
|
||||
|
||||
case CTFLT:
|
||||
Yyerror("constant %v overflows %v", Fconv(v.U.(*Mpflt), obj.FmtSharp), t)
|
||||
|
||||
case CTCPLX:
|
||||
Yyerror("constant %v overflows %v", Fconv(v.U.(*Mpflt), obj.FmtSharp), t)
|
||||
if doesoverflow(v, t) {
|
||||
Yyerror("constant %s overflows %v", Vconv(v, 0), t)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -997,37 +999,37 @@ func evconst(n *Node) {
|
|||
goto setfalse
|
||||
|
||||
case OEQ<<16 | CTSTR:
|
||||
if cmpslit(nl, nr) == 0 {
|
||||
if strlit(nl) == strlit(nr) {
|
||||
goto settrue
|
||||
}
|
||||
goto setfalse
|
||||
|
||||
case ONE<<16 | CTSTR:
|
||||
if cmpslit(nl, nr) != 0 {
|
||||
if strlit(nl) != strlit(nr) {
|
||||
goto settrue
|
||||
}
|
||||
goto setfalse
|
||||
|
||||
case OLT<<16 | CTSTR:
|
||||
if cmpslit(nl, nr) < 0 {
|
||||
if strlit(nl) < strlit(nr) {
|
||||
goto settrue
|
||||
}
|
||||
goto setfalse
|
||||
|
||||
case OLE<<16 | CTSTR:
|
||||
if cmpslit(nl, nr) <= 0 {
|
||||
if strlit(nl) <= strlit(nr) {
|
||||
goto settrue
|
||||
}
|
||||
goto setfalse
|
||||
|
||||
case OGE<<16 | CTSTR:
|
||||
if cmpslit(nl, nr) >= 0 {
|
||||
if strlit(nl) >= strlit(nr) {
|
||||
goto settrue
|
||||
}
|
||||
goto setfalse
|
||||
|
||||
case OGT<<16 | CTSTR:
|
||||
if cmpslit(nl, nr) > 0 {
|
||||
if strlit(nl) > strlit(nr) {
|
||||
goto settrue
|
||||
}
|
||||
goto setfalse
|
||||
|
|
@ -1352,8 +1354,9 @@ func defaultlit2(lp **Node, rp **Node, force int) {
|
|||
Convlit(rp, Types[TINT])
|
||||
}
|
||||
|
||||
func cmpslit(l, r *Node) int {
|
||||
return stringsCompare(l.Val().U.(string), r.Val().U.(string))
|
||||
// strlit returns the value of a literal string Node as a string.
|
||||
func strlit(n *Node) string {
|
||||
return n.Val().U.(string)
|
||||
}
|
||||
|
||||
func Smallintconst(n *Node) bool {
|
||||
|
|
|
|||
|
|
@ -79,16 +79,6 @@ func popdcl() {
|
|||
block = d.Block
|
||||
}
|
||||
|
||||
func poptodcl() {
|
||||
// pop the old marker and push a new one
|
||||
// (cannot reuse the existing one)
|
||||
// because we use the markers to identify blocks
|
||||
// for the goto restriction checks.
|
||||
popdcl()
|
||||
|
||||
markdcl()
|
||||
}
|
||||
|
||||
func markdcl() {
|
||||
d := push()
|
||||
d.Name = "" // used as a mark in fifo
|
||||
|
|
@ -192,7 +182,7 @@ func declare(n *Node, ctxt uint8) {
|
|||
|
||||
gen := 0
|
||||
if ctxt == PEXTERN {
|
||||
externdcl = list(externdcl, n)
|
||||
externdcl = append(externdcl, n)
|
||||
if dflag() {
|
||||
fmt.Printf("\t%v global decl %v %p\n", Ctxt.Line(int(lineno)), s, n)
|
||||
}
|
||||
|
|
@ -1509,5 +1499,5 @@ func makefuncsym(s *Sym) {
|
|||
s1 := funcsym(s)
|
||||
s1.Def = newfuncname(s1)
|
||||
s1.Def.Func.Shortname = newname(s)
|
||||
funcsyms = list(funcsyms, s1.Def)
|
||||
funcsyms = append(funcsyms, s1.Def)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -248,17 +248,6 @@ func satInc8(x int8) int8 {
|
|||
return x + 1
|
||||
}
|
||||
|
||||
func satAdd8(x, y int8) int8 {
|
||||
z := x + y
|
||||
if x^y < 0 || x^z >= 0 {
|
||||
return z
|
||||
}
|
||||
if x < 0 {
|
||||
return -128
|
||||
}
|
||||
return 127
|
||||
}
|
||||
|
||||
func min8(a, b int8) int8 {
|
||||
if a < b {
|
||||
return a
|
||||
|
|
@ -387,7 +376,6 @@ func escMax(e, etype uint16) uint16 {
|
|||
const (
|
||||
bitsPerOutputInTag = 3 // For each output, the number of bits for a tag
|
||||
bitsMaskForTag = uint16(1<<bitsPerOutputInTag) - 1 // The bit mask to extract a single tag.
|
||||
outputsPerTag = (16 - EscReturnBits) / bitsPerOutputInTag // The number of outputs that can be tagged.
|
||||
maxEncodedLevel = int(bitsMaskForTag - 1) // The largest level that can be stored in a tag.
|
||||
)
|
||||
|
||||
|
|
@ -662,10 +650,15 @@ func esc(e *EscState, n *Node, up *Node) {
|
|||
|
||||
n.Left.Sym.Label = nil
|
||||
|
||||
// Everything but fixed array is a dereference.
|
||||
case ORANGE:
|
||||
if n.List != nil && n.List.Next != nil {
|
||||
if Isfixedarray(n.Type) {
|
||||
// Everything but fixed array is a dereference.
|
||||
|
||||
// If fixed array is really the address of fixed array,
|
||||
// it is also a dereference, because it is implicitly
|
||||
// dereferenced (see #12588)
|
||||
if Isfixedarray(n.Type) &&
|
||||
!(Isptr[n.Right.Type.Etype] && Eqtype(n.Right.Type.Type, n.Type)) {
|
||||
escassign(e, n.List.Next.N, n.Right)
|
||||
} else {
|
||||
escassignDereference(e, n.List.Next.N, n.Right)
|
||||
|
|
@ -958,6 +951,7 @@ func escassign(e *EscState, dst *Node, src *Node) {
|
|||
OMAPLIT,
|
||||
OSTRUCTLIT,
|
||||
OPTRLIT,
|
||||
ODDDARG,
|
||||
OCALLPART:
|
||||
break
|
||||
|
||||
|
|
@ -1463,8 +1457,9 @@ func esccall(e *EscState, n *Node, up *Node) {
|
|||
}
|
||||
}
|
||||
|
||||
var src *Node
|
||||
for t := getinargx(fntype).Type; ll != nil; ll = ll.Next {
|
||||
src := ll.N
|
||||
src = ll.N
|
||||
if t.Isddd && !n.Isddd {
|
||||
// Introduce ODDDARG node to represent ... allocation.
|
||||
src = Nod(ODDDARG, nil, nil)
|
||||
|
|
@ -1505,17 +1500,17 @@ func esccall(e *EscState, n *Node, up *Node) {
|
|||
}
|
||||
|
||||
if src != ll.N {
|
||||
// This occurs when function parameter type Isddd and n not Isddd
|
||||
break
|
||||
}
|
||||
t = t.Down
|
||||
}
|
||||
|
||||
// "..." arguments are untracked
|
||||
for ; ll != nil; ll = ll.Next {
|
||||
escassign(e, &e.theSink, ll.N)
|
||||
if Debug['m'] > 2 {
|
||||
fmt.Printf("%v::esccall:: ... <- %v, untracked\n", Ctxt.Line(int(lineno)), Nconv(ll.N, obj.FmtShort))
|
||||
fmt.Printf("%v::esccall:: ... <- %v\n", Ctxt.Line(int(lineno)), Nconv(ll.N, obj.FmtShort))
|
||||
}
|
||||
escassign(e, src, ll.N) // args to slice
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1701,6 +1696,16 @@ func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
|||
case OAPPEND:
|
||||
escwalk(e, level, dst, src.List.N)
|
||||
|
||||
case ODDDARG:
|
||||
if leaks {
|
||||
src.Esc = EscHeap
|
||||
if Debug['m'] != 0 {
|
||||
Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
|
||||
}
|
||||
}
|
||||
// similar to a slice arraylit and its args.
|
||||
level = level.dec()
|
||||
|
||||
case OARRAYLIT:
|
||||
if Isfixedarray(src.Type) {
|
||||
break
|
||||
|
|
@ -1711,8 +1716,7 @@ func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
|||
|
||||
fallthrough
|
||||
|
||||
case ODDDARG,
|
||||
OMAKECHAN,
|
||||
case OMAKECHAN,
|
||||
OMAKEMAP,
|
||||
OMAKESLICE,
|
||||
OARRAYRUNESTR,
|
||||
|
|
|
|||
|
|
@ -253,21 +253,12 @@ func dumpexportvar(s *Sym) {
|
|||
}
|
||||
}
|
||||
|
||||
// methodbyname sorts types by symbol name.
|
||||
type methodbyname []*Type
|
||||
|
||||
func (x methodbyname) Len() int {
|
||||
return len(x)
|
||||
}
|
||||
|
||||
func (x methodbyname) Swap(i, j int) {
|
||||
x[i], x[j] = x[j], x[i]
|
||||
}
|
||||
|
||||
func (x methodbyname) Less(i, j int) bool {
|
||||
a := x[i]
|
||||
b := x[j]
|
||||
return stringsCompare(a.Sym.Name, b.Sym.Name) < 0
|
||||
}
|
||||
func (x methodbyname) Len() int { return len(x) }
|
||||
func (x methodbyname) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x methodbyname) Less(i, j int) bool { return x[i].Sym.Name < x[j].Sym.Name }
|
||||
|
||||
func dumpexporttype(t *Type) {
|
||||
if t == nil {
|
||||
|
|
@ -289,24 +280,15 @@ func dumpexporttype(t *Type) {
|
|||
return
|
||||
}
|
||||
|
||||
n := 0
|
||||
var m []*Type
|
||||
for f := t.Method; f != nil; f = f.Down {
|
||||
dumpexporttype(f)
|
||||
n++
|
||||
m = append(m, f)
|
||||
}
|
||||
|
||||
m := make([]*Type, n)
|
||||
i := 0
|
||||
for f := t.Method; f != nil; f = f.Down {
|
||||
m[i] = f
|
||||
i++
|
||||
}
|
||||
sort.Sort(methodbyname(m[:n]))
|
||||
sort.Sort(methodbyname(m))
|
||||
|
||||
fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
|
||||
var f *Type
|
||||
for i := 0; i < n; i++ {
|
||||
f = m[i]
|
||||
for _, f := range m {
|
||||
if f.Nointerface {
|
||||
fmt.Fprintf(bout, "\t//go:nointerface\n")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1596,8 +1596,6 @@ func Sconv(s *Sym, flag int) string {
|
|||
|
||||
sf := flag
|
||||
sm := setfmode(&flag)
|
||||
var r int
|
||||
_ = r
|
||||
str := symfmt(s, flag)
|
||||
flag = sf
|
||||
fmtmode = sm
|
||||
|
|
@ -1632,8 +1630,6 @@ func Tconv(t *Type, flag int) string {
|
|||
flag |= obj.FmtUnsigned
|
||||
}
|
||||
|
||||
var r int
|
||||
_ = r
|
||||
str := typefmt(t, flag)
|
||||
|
||||
if fmtmode == FTypeId && (sf&obj.FmtUnsigned != 0) {
|
||||
|
|
@ -1660,8 +1656,6 @@ func Nconv(n *Node, flag int) string {
|
|||
sf := flag
|
||||
sm := setfmode(&flag)
|
||||
|
||||
var r int
|
||||
_ = r
|
||||
var str string
|
||||
switch fmtmode {
|
||||
case FErr, FExp:
|
||||
|
|
@ -1694,8 +1688,6 @@ func Hconv(l *NodeList, flag int) string {
|
|||
|
||||
sf := flag
|
||||
sm := setfmode(&flag)
|
||||
var r int
|
||||
_ = r
|
||||
sep := "; "
|
||||
if fmtmode == FDbg {
|
||||
sep = "\n"
|
||||
|
|
|
|||
|
|
@ -210,7 +210,7 @@ type Type struct {
|
|||
Embedlineno int32 // first use of TFORW as embedded type
|
||||
|
||||
// for TFORW, where to copy the eventual value to
|
||||
Copyto *NodeList
|
||||
Copyto []*Node
|
||||
|
||||
Lastfn *Node // for usefield
|
||||
}
|
||||
|
|
@ -376,18 +376,17 @@ type Sig struct {
|
|||
type_ *Type
|
||||
mtype *Type
|
||||
offset int32
|
||||
link *Sig
|
||||
}
|
||||
|
||||
type Io struct {
|
||||
infile string
|
||||
bin *obj.Biobuf
|
||||
nlsemi int
|
||||
eofnl int
|
||||
cp string // used for content when bin==nil
|
||||
last int
|
||||
peekc int
|
||||
peekc1 int // second peekc for ...
|
||||
cp string // used for content when bin==nil
|
||||
nlsemi bool
|
||||
eofnl bool
|
||||
importsafe bool
|
||||
}
|
||||
|
||||
|
|
@ -584,13 +583,13 @@ var maxfltval [NTYPE]*Mpflt
|
|||
|
||||
var xtop *NodeList
|
||||
|
||||
var externdcl *NodeList
|
||||
var externdcl []*Node
|
||||
|
||||
var exportlist []*Node
|
||||
|
||||
var importlist []*Node // imported functions and methods with inlinable bodies
|
||||
|
||||
var funcsyms *NodeList
|
||||
var funcsyms []*Node
|
||||
|
||||
var dclcontext uint8 // PEXTERN/PAUTO
|
||||
|
||||
|
|
@ -598,7 +597,7 @@ var incannedimport int
|
|||
|
||||
var statuniqgen int // name generator for static temps
|
||||
|
||||
var loophack int
|
||||
var loophack bool
|
||||
|
||||
var iota_ int32
|
||||
|
||||
|
|
@ -630,12 +629,6 @@ var typesw *Node
|
|||
|
||||
var nblank *Node
|
||||
|
||||
var hunk string
|
||||
|
||||
var nhunk int32
|
||||
|
||||
var thunk int32
|
||||
|
||||
var Funcdepth int32
|
||||
|
||||
var typecheckok bool
|
||||
|
|
|
|||
|
|
@ -2311,6 +2311,6 @@ func fixlbrace(lbr int) {
|
|||
// set up for another one now that we're done.
|
||||
// See comment in lex.C about loophack.
|
||||
if lbr == LBODY {
|
||||
loophack = 1
|
||||
loophack = true
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -87,7 +87,9 @@ func Gbranch(as int, t *Type, likely int) *obj.Prog {
|
|||
p.To.Val = nil
|
||||
if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' {
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = int64(obj.Bool2int(likely > 0))
|
||||
if likely > 0 {
|
||||
p.From.Offset = 1
|
||||
}
|
||||
}
|
||||
|
||||
if Debug['g'] != 0 {
|
||||
|
|
@ -576,9 +578,7 @@ fp:
|
|||
n.Op = OINDREG
|
||||
|
||||
n.Reg = int16(Thearch.REGSP)
|
||||
if HasLinkRegister() {
|
||||
n.Xoffset += int64(Ctxt.Arch.Ptrsize)
|
||||
}
|
||||
n.Xoffset += Ctxt.FixedFrameSize()
|
||||
|
||||
case 1: // input arg
|
||||
n.Class = PPARAM
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ var yyprev int
|
|||
|
||||
var yylast int
|
||||
|
||||
var imported_unsafe int
|
||||
var imported_unsafe bool
|
||||
|
||||
var (
|
||||
goos string
|
||||
|
|
@ -60,26 +60,6 @@ var debugtab = []struct {
|
|||
{"wb", &Debug_wb}, // print information about write barriers
|
||||
}
|
||||
|
||||
// Our own isdigit, isspace, isalpha, isalnum that take care
|
||||
// of EOF and other out of range arguments.
|
||||
func yy_isdigit(c int) bool {
|
||||
return c >= 0 && c <= 0xFF && isdigit(c)
|
||||
}
|
||||
|
||||
func yy_isspace(c int) bool {
|
||||
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
|
||||
}
|
||||
|
||||
func yy_isalpha(c int) bool {
|
||||
return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
|
||||
}
|
||||
|
||||
func yy_isalnum(c int) bool {
|
||||
return c >= 0 && c <= 0xFF && isalnum(c)
|
||||
}
|
||||
|
||||
// Disallow use of isdigit etc.
|
||||
|
||||
const (
|
||||
EOF = -1
|
||||
)
|
||||
|
|
@ -334,8 +314,8 @@ func Main() {
|
|||
|
||||
curio.peekc = 0
|
||||
curio.peekc1 = 0
|
||||
curio.nlsemi = 0
|
||||
curio.eofnl = 0
|
||||
curio.nlsemi = false
|
||||
curio.eofnl = false
|
||||
curio.last = 0
|
||||
|
||||
// Skip initial BOM if present.
|
||||
|
|
@ -346,7 +326,7 @@ func Main() {
|
|||
block = 1
|
||||
iota_ = -1000000
|
||||
|
||||
imported_unsafe = 0
|
||||
imported_unsafe = false
|
||||
|
||||
yyparse()
|
||||
if nsyntaxerrors != 0 {
|
||||
|
|
@ -484,9 +464,9 @@ func Main() {
|
|||
}
|
||||
|
||||
// Phase 9: Check external declarations.
|
||||
for l := externdcl; l != nil; l = l.Next {
|
||||
if l.N.Op == ONAME {
|
||||
typecheck(&l.N, Erv)
|
||||
for i, n := range externdcl {
|
||||
if n.Op == ONAME {
|
||||
typecheck(&externdcl[i], Erv)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -587,7 +567,7 @@ func addidir(dir string) {
|
|||
// is this path a local name? begins with ./ or ../ or /
|
||||
func islocalname(name string) bool {
|
||||
return strings.HasPrefix(name, "/") ||
|
||||
Ctxt.Windows != 0 && len(name) >= 3 && yy_isalpha(int(name[0])) && name[1] == ':' && name[2] == '/' ||
|
||||
Ctxt.Windows != 0 && len(name) >= 3 && isAlpha(int(name[0])) && name[1] == ':' && name[2] == '/' ||
|
||||
strings.HasPrefix(name, "./") || name == "." ||
|
||||
strings.HasPrefix(name, "../") || name == ".."
|
||||
}
|
||||
|
|
@ -615,9 +595,7 @@ func findpkg(name string) (file string, ok bool) {
|
|||
// local imports should be canonicalized already.
|
||||
// don't want to see "encoding/../encoding/base64"
|
||||
// as different from "encoding/base64".
|
||||
var q string
|
||||
_ = q
|
||||
if path.Clean(name) != name {
|
||||
if q := path.Clean(name); q != name {
|
||||
Yyerror("non-canonical import path %q (should be %q)", name, q)
|
||||
return "", false
|
||||
}
|
||||
|
|
@ -702,7 +680,7 @@ func importfile(f *Val, line int) {
|
|||
|
||||
importpkg = mkpkg(f.U.(string))
|
||||
cannedimports("unsafe.o", unsafeimport)
|
||||
imported_unsafe = 1
|
||||
imported_unsafe = true
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -803,19 +781,18 @@ func importfile(f *Val, line int) {
|
|||
curio.peekc = 0
|
||||
curio.peekc1 = 0
|
||||
curio.infile = file
|
||||
curio.nlsemi = 0
|
||||
curio.nlsemi = false
|
||||
typecheckok = true
|
||||
|
||||
var c int32
|
||||
for {
|
||||
c = int32(getc())
|
||||
c := getc()
|
||||
if c == EOF {
|
||||
break
|
||||
}
|
||||
if c != '$' {
|
||||
continue
|
||||
}
|
||||
c = int32(getc())
|
||||
c = getc()
|
||||
if c == EOF {
|
||||
break
|
||||
}
|
||||
|
|
@ -854,17 +831,44 @@ func cannedimports(file string, cp string) {
|
|||
curio.peekc1 = 0
|
||||
curio.infile = file
|
||||
curio.cp = cp
|
||||
curio.nlsemi = 0
|
||||
curio.nlsemi = false
|
||||
curio.importsafe = false
|
||||
|
||||
typecheckok = true
|
||||
incannedimport = 1
|
||||
}
|
||||
|
||||
func isSpace(c int) bool {
|
||||
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
|
||||
}
|
||||
|
||||
func isAlpha(c int) bool {
|
||||
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
|
||||
}
|
||||
|
||||
func isDigit(c int) bool {
|
||||
return '0' <= c && c <= '9'
|
||||
}
|
||||
func isAlnum(c int) bool {
|
||||
return isAlpha(c) || isDigit(c)
|
||||
}
|
||||
|
||||
func plan9quote(s string) string {
|
||||
if s == "" {
|
||||
return "''"
|
||||
}
|
||||
for _, c := range s {
|
||||
if c <= ' ' || c == '\'' {
|
||||
return "'" + strings.Replace(s, "'", "''", -1) + "'"
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func isfrog(c int) bool {
|
||||
// complain about possibly invisible control characters
|
||||
if c < ' ' {
|
||||
return !yy_isspace(c) // exclude good white space
|
||||
return !isSpace(c) // exclude good white space
|
||||
}
|
||||
|
||||
if 0x7f <= c && c <= 0xa0 { // DEL, unicode block including unbreakable space.
|
||||
|
|
@ -874,8 +878,8 @@ func isfrog(c int) bool {
|
|||
}
|
||||
|
||||
type Loophack struct {
|
||||
v int
|
||||
next *Loophack
|
||||
v bool
|
||||
}
|
||||
|
||||
var _yylex_lstk *Loophack
|
||||
|
|
@ -885,7 +889,6 @@ func _yylex(yylval *yySymType) int32 {
|
|||
var escflag int
|
||||
var v int64
|
||||
var cp *bytes.Buffer
|
||||
var rune_ uint
|
||||
var s *Sym
|
||||
var h *Loophack
|
||||
var str string
|
||||
|
|
@ -894,8 +897,8 @@ func _yylex(yylval *yySymType) int32 {
|
|||
|
||||
l0:
|
||||
c := getc()
|
||||
if yy_isspace(c) {
|
||||
if c == '\n' && curio.nlsemi != 0 {
|
||||
if isSpace(c) {
|
||||
if c == '\n' && curio.nlsemi {
|
||||
ungetc(c)
|
||||
if Debug['x'] != 0 {
|
||||
fmt.Printf("lex: implicit semi\n")
|
||||
|
|
@ -916,20 +919,20 @@ l0:
|
|||
goto talph
|
||||
}
|
||||
|
||||
if yy_isalpha(c) {
|
||||
if isAlpha(c) {
|
||||
cp = &lexbuf
|
||||
cp.Reset()
|
||||
goto talph
|
||||
}
|
||||
|
||||
if yy_isdigit(c) {
|
||||
if isDigit(c) {
|
||||
cp = &lexbuf
|
||||
cp.Reset()
|
||||
if c != '0' {
|
||||
for {
|
||||
cp.WriteByte(byte(c))
|
||||
c = getc()
|
||||
if yy_isdigit(c) {
|
||||
if isDigit(c) {
|
||||
continue
|
||||
}
|
||||
if c == '.' {
|
||||
|
|
@ -951,7 +954,7 @@ l0:
|
|||
for {
|
||||
cp.WriteByte(byte(c))
|
||||
c = getc()
|
||||
if yy_isdigit(c) {
|
||||
if isDigit(c) {
|
||||
continue
|
||||
}
|
||||
if c >= 'a' && c <= 'f' {
|
||||
|
|
@ -976,7 +979,7 @@ l0:
|
|||
|
||||
c1 = 0
|
||||
for {
|
||||
if !yy_isdigit(c) {
|
||||
if !isDigit(c) {
|
||||
break
|
||||
}
|
||||
if c < '0' || c > '7' {
|
||||
|
|
@ -1014,7 +1017,7 @@ l0:
|
|||
|
||||
case '.':
|
||||
c1 = getc()
|
||||
if yy_isdigit(c1) {
|
||||
if isDigit(c1) {
|
||||
cp = &lexbuf
|
||||
cp.Reset()
|
||||
cp.WriteByte(byte(c))
|
||||
|
|
@ -1048,8 +1051,7 @@ l0:
|
|||
if v < utf8.RuneSelf || escflag != 0 {
|
||||
cp.WriteByte(byte(v))
|
||||
} else {
|
||||
rune_ = uint(v)
|
||||
cp.WriteRune(rune(rune_))
|
||||
cp.WriteRune(rune(v))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1106,23 +1108,23 @@ l0:
|
|||
case '/':
|
||||
c1 = getc()
|
||||
if c1 == '*' {
|
||||
nl := 0
|
||||
nl := false
|
||||
for {
|
||||
c = int(getr())
|
||||
if c == '\n' {
|
||||
nl = 1
|
||||
nl = true
|
||||
}
|
||||
for c == '*' {
|
||||
c = int(getr())
|
||||
if c == '/' {
|
||||
if nl != 0 {
|
||||
if nl {
|
||||
ungetc('\n')
|
||||
}
|
||||
goto l0
|
||||
}
|
||||
|
||||
if c == '\n' {
|
||||
nl = 1
|
||||
nl = true
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1308,15 +1310,15 @@ l0:
|
|||
*
|
||||
* when we see the keyword, the next
|
||||
* non-parenthesized '{' becomes an LBODY.
|
||||
* loophack is normally 0.
|
||||
* a keyword makes it go up to 1.
|
||||
* parens push loophack onto a stack and go back to 0.
|
||||
* a '{' with loophack == 1 becomes LBODY and disables loophack.
|
||||
* loophack is normally false.
|
||||
* a keyword sets it to true.
|
||||
* parens push loophack onto a stack and go back to false.
|
||||
* a '{' with loophack == true becomes LBODY and disables loophack.
|
||||
*
|
||||
* i said it was clumsy.
|
||||
*/
|
||||
case '(', '[':
|
||||
if loophack != 0 || _yylex_lstk != nil {
|
||||
if loophack || _yylex_lstk != nil {
|
||||
h = new(Loophack)
|
||||
if h == nil {
|
||||
Flusherrors()
|
||||
|
|
@ -1327,7 +1329,7 @@ l0:
|
|||
h.v = loophack
|
||||
h.next = _yylex_lstk
|
||||
_yylex_lstk = h
|
||||
loophack = 0
|
||||
loophack = false
|
||||
}
|
||||
|
||||
goto lx
|
||||
|
|
@ -1342,11 +1344,11 @@ l0:
|
|||
goto lx
|
||||
|
||||
case '{':
|
||||
if loophack == 1 {
|
||||
if loophack {
|
||||
if Debug['x'] != 0 {
|
||||
fmt.Printf("%v lex: LBODY\n", Ctxt.Line(int(lexlineno)))
|
||||
}
|
||||
loophack = 0
|
||||
loophack = false
|
||||
return LBODY
|
||||
}
|
||||
|
||||
|
|
@ -1395,14 +1397,14 @@ talph:
|
|||
for {
|
||||
if c >= utf8.RuneSelf {
|
||||
ungetc(c)
|
||||
rune_ = uint(getr())
|
||||
r := rune(getr())
|
||||
|
||||
// 0xb7 · is used for internal names
|
||||
if !unicode.IsLetter(rune(rune_)) && !unicode.IsDigit(rune(rune_)) && (importpkg == nil || rune_ != 0xb7) {
|
||||
Yyerror("invalid identifier character U+%04x", rune_)
|
||||
if !unicode.IsLetter(r) && !unicode.IsDigit(r) && (importpkg == nil || r != 0xb7) {
|
||||
Yyerror("invalid identifier character U+%04x", r)
|
||||
}
|
||||
cp.WriteRune(rune(rune_))
|
||||
} else if !yy_isalnum(c) && c != '_' {
|
||||
cp.WriteRune(r)
|
||||
} else if !isAlnum(c) && c != '_' {
|
||||
break
|
||||
} else {
|
||||
cp.WriteByte(byte(c))
|
||||
|
|
@ -1419,7 +1421,7 @@ talph:
|
|||
goto l0
|
||||
|
||||
case LFOR, LIF, LSWITCH, LSELECT:
|
||||
loophack = 1 // see comment about loophack above
|
||||
loophack = true // see comment about loophack above
|
||||
}
|
||||
|
||||
if Debug['x'] != 0 {
|
||||
|
|
@ -1450,7 +1452,7 @@ casedot:
|
|||
for {
|
||||
cp.WriteByte(byte(c))
|
||||
c = getc()
|
||||
if !yy_isdigit(c) {
|
||||
if !isDigit(c) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
@ -1475,10 +1477,10 @@ caseep:
|
|||
c = getc()
|
||||
}
|
||||
|
||||
if !yy_isdigit(c) {
|
||||
if !isDigit(c) {
|
||||
Yyerror("malformed floating point constant exponent")
|
||||
}
|
||||
for yy_isdigit(c) {
|
||||
for isDigit(c) {
|
||||
cp.WriteByte(byte(c))
|
||||
c = getc()
|
||||
}
|
||||
|
|
@ -1548,7 +1550,7 @@ func internString(b []byte) string {
|
|||
|
||||
func more(pp *string) bool {
|
||||
p := *pp
|
||||
for p != "" && yy_isspace(int(p[0])) {
|
||||
for p != "" && isSpace(int(p[0])) {
|
||||
p = p[1:]
|
||||
}
|
||||
*pp = p
|
||||
|
|
@ -1594,7 +1596,7 @@ func getlinepragma() int {
|
|||
}
|
||||
|
||||
if verb == "go:linkname" {
|
||||
if imported_unsafe == 0 {
|
||||
if !imported_unsafe {
|
||||
Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
|
||||
}
|
||||
f := strings.Fields(cmd)
|
||||
|
|
@ -1711,7 +1713,7 @@ func getimpsym(pp *string) string {
|
|||
return ""
|
||||
}
|
||||
i := 0
|
||||
for i < len(p) && !yy_isspace(int(p[i])) && p[i] != '"' {
|
||||
for i < len(p) && !isSpace(int(p[i])) && p[i] != '"' {
|
||||
i++
|
||||
}
|
||||
sym := p[:i]
|
||||
|
|
@ -1746,9 +1748,7 @@ func pragcgo(text string) {
|
|||
verb := text[3:] // skip "go:"
|
||||
|
||||
if verb == "cgo_dynamic_linker" || verb == "dynlinker" {
|
||||
var ok bool
|
||||
var p string
|
||||
p, ok = getquoted(&q)
|
||||
p, ok := getquoted(&q)
|
||||
if !ok {
|
||||
Yyerror("usage: //go:cgo_dynamic_linker \"path\"")
|
||||
return
|
||||
|
|
@ -1830,9 +1830,7 @@ func pragcgo(text string) {
|
|||
}
|
||||
|
||||
if verb == "cgo_ldflag" {
|
||||
var ok bool
|
||||
var p string
|
||||
p, ok = getquoted(&q)
|
||||
p, ok := getquoted(&q)
|
||||
if !ok {
|
||||
Yyerror("usage: //go:cgo_ldflag \"arg\"")
|
||||
return
|
||||
|
|
@ -1866,7 +1864,7 @@ func yyparse() {
|
|||
func yylex(yylval *yySymType) int32 {
|
||||
lx := int(_yylex(yylval))
|
||||
|
||||
if curio.nlsemi != 0 && lx == EOF {
|
||||
if curio.nlsemi && lx == EOF {
|
||||
// Treat EOF as "end of line" for the purposes
|
||||
// of inserting a semicolon.
|
||||
lx = ';'
|
||||
|
|
@ -1884,10 +1882,10 @@ func yylex(yylval *yySymType) int32 {
|
|||
')',
|
||||
'}',
|
||||
']':
|
||||
curio.nlsemi = 1
|
||||
curio.nlsemi = true
|
||||
|
||||
default:
|
||||
curio.nlsemi = 0
|
||||
curio.nlsemi = false
|
||||
}
|
||||
|
||||
// Track last two tokens returned by yylex.
|
||||
|
|
@ -1942,10 +1940,10 @@ check:
|
|||
|
||||
// insert \n at EOF
|
||||
case EOF:
|
||||
if curio.eofnl != 0 || curio.last == '\n' {
|
||||
if curio.eofnl || curio.last == '\n' {
|
||||
return EOF
|
||||
}
|
||||
curio.eofnl = 1
|
||||
curio.eofnl = true
|
||||
c = '\n'
|
||||
fallthrough
|
||||
|
||||
|
|
@ -2189,32 +2187,22 @@ var syms = []struct {
|
|||
{"insofaras", LIGNORE, Txxx, OXXX},
|
||||
}
|
||||
|
||||
// lexinit initializes known symbols and the basic types.
|
||||
func lexinit() {
|
||||
var lex int
|
||||
var s *Sym
|
||||
var s1 *Sym
|
||||
var t *Type
|
||||
var etype int
|
||||
for _, s := range syms {
|
||||
lex := s.lexical
|
||||
s1 := Lookup(s.name)
|
||||
s1.Lexical = uint16(lex)
|
||||
|
||||
/*
|
||||
* initialize basic types array
|
||||
* initialize known symbols
|
||||
*/
|
||||
for i := 0; i < len(syms); i++ {
|
||||
lex = syms[i].lexical
|
||||
s = Lookup(syms[i].name)
|
||||
s.Lexical = uint16(lex)
|
||||
|
||||
etype = syms[i].etype
|
||||
if etype != Txxx {
|
||||
if etype := s.etype; etype != Txxx {
|
||||
if etype < 0 || etype >= len(Types) {
|
||||
Fatalf("lexinit: %s bad etype", s.Name)
|
||||
Fatalf("lexinit: %s bad etype", s.name)
|
||||
}
|
||||
s1 = Pkglookup(syms[i].name, builtinpkg)
|
||||
t = Types[etype]
|
||||
s2 := Pkglookup(s.name, builtinpkg)
|
||||
t := Types[etype]
|
||||
if t == nil {
|
||||
t = typ(etype)
|
||||
t.Sym = s1
|
||||
t.Sym = s2
|
||||
|
||||
if etype != TANY && etype != TSTRING {
|
||||
dowidth(t)
|
||||
|
|
@ -2222,19 +2210,18 @@ func lexinit() {
|
|||
Types[etype] = t
|
||||
}
|
||||
|
||||
s1.Lexical = LNAME
|
||||
s1.Def = typenod(t)
|
||||
s1.Def.Name = new(Name)
|
||||
s2.Lexical = LNAME
|
||||
s2.Def = typenod(t)
|
||||
s2.Def.Name = new(Name)
|
||||
continue
|
||||
}
|
||||
|
||||
etype = syms[i].op
|
||||
if etype != OXXX {
|
||||
s1 = Pkglookup(syms[i].name, builtinpkg)
|
||||
s1.Lexical = LNAME
|
||||
s1.Def = Nod(ONAME, nil, nil)
|
||||
s1.Def.Sym = s1
|
||||
s1.Def.Etype = uint8(etype)
|
||||
if etype := s.op; etype != OXXX {
|
||||
s2 := Pkglookup(s.name, builtinpkg)
|
||||
s2.Lexical = LNAME
|
||||
s2.Def = Nod(ONAME, nil, nil)
|
||||
s2.Def.Sym = s2
|
||||
s2.Def.Etype = uint8(etype)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2247,7 +2234,7 @@ func lexinit() {
|
|||
|
||||
idealbool = typ(TBOOL)
|
||||
|
||||
s = Pkglookup("true", builtinpkg)
|
||||
s := Pkglookup("true", builtinpkg)
|
||||
s.Def = Nodbool(true)
|
||||
s.Def.Sym = Lookup("true")
|
||||
s.Def.Name = new(Name)
|
||||
|
|
@ -2446,136 +2433,121 @@ func lexfini() {
|
|||
nodfp.Sym = Lookup(".fp")
|
||||
}
|
||||
|
||||
var lexn = []struct {
|
||||
lex int
|
||||
name string
|
||||
}{
|
||||
{LANDAND, "ANDAND"},
|
||||
{LANDNOT, "ANDNOT"},
|
||||
{LASOP, "ASOP"},
|
||||
{LBREAK, "BREAK"},
|
||||
{LCASE, "CASE"},
|
||||
{LCHAN, "CHAN"},
|
||||
{LCOLAS, "COLAS"},
|
||||
{LCOMM, "<-"},
|
||||
{LCONST, "CONST"},
|
||||
{LCONTINUE, "CONTINUE"},
|
||||
{LDDD, "..."},
|
||||
{LDEC, "DEC"},
|
||||
{LDEFAULT, "DEFAULT"},
|
||||
{LDEFER, "DEFER"},
|
||||
{LELSE, "ELSE"},
|
||||
{LEQ, "EQ"},
|
||||
{LFALL, "FALL"},
|
||||
{LFOR, "FOR"},
|
||||
{LFUNC, "FUNC"},
|
||||
{LGE, "GE"},
|
||||
{LGO, "GO"},
|
||||
{LGOTO, "GOTO"},
|
||||
{LGT, "GT"},
|
||||
{LIF, "IF"},
|
||||
{LIMPORT, "IMPORT"},
|
||||
{LINC, "INC"},
|
||||
{LINTERFACE, "INTERFACE"},
|
||||
{LLE, "LE"},
|
||||
{LLITERAL, "LITERAL"},
|
||||
{LLSH, "LSH"},
|
||||
{LLT, "LT"},
|
||||
{LMAP, "MAP"},
|
||||
{LNAME, "NAME"},
|
||||
{LNE, "NE"},
|
||||
{LOROR, "OROR"},
|
||||
{LPACKAGE, "PACKAGE"},
|
||||
{LRANGE, "RANGE"},
|
||||
{LRETURN, "RETURN"},
|
||||
{LRSH, "RSH"},
|
||||
{LSELECT, "SELECT"},
|
||||
{LSTRUCT, "STRUCT"},
|
||||
{LSWITCH, "SWITCH"},
|
||||
{LTYPE, "TYPE"},
|
||||
{LVAR, "VAR"},
|
||||
var lexn = map[int]string{
|
||||
LANDAND: "ANDAND",
|
||||
LANDNOT: "ANDNOT",
|
||||
LASOP: "ASOP",
|
||||
LBREAK: "BREAK",
|
||||
LCASE: "CASE",
|
||||
LCHAN: "CHAN",
|
||||
LCOLAS: "COLAS",
|
||||
LCOMM: "<-",
|
||||
LCONST: "CONST",
|
||||
LCONTINUE: "CONTINUE",
|
||||
LDDD: "...",
|
||||
LDEC: "DEC",
|
||||
LDEFAULT: "DEFAULT",
|
||||
LDEFER: "DEFER",
|
||||
LELSE: "ELSE",
|
||||
LEQ: "EQ",
|
||||
LFALL: "FALL",
|
||||
LFOR: "FOR",
|
||||
LFUNC: "FUNC",
|
||||
LGE: "GE",
|
||||
LGO: "GO",
|
||||
LGOTO: "GOTO",
|
||||
LGT: "GT",
|
||||
LIF: "IF",
|
||||
LIMPORT: "IMPORT",
|
||||
LINC: "INC",
|
||||
LINTERFACE: "INTERFACE",
|
||||
LLE: "LE",
|
||||
LLITERAL: "LITERAL",
|
||||
LLSH: "LSH",
|
||||
LLT: "LT",
|
||||
LMAP: "MAP",
|
||||
LNAME: "NAME",
|
||||
LNE: "NE",
|
||||
LOROR: "OROR",
|
||||
LPACKAGE: "PACKAGE",
|
||||
LRANGE: "RANGE",
|
||||
LRETURN: "RETURN",
|
||||
LRSH: "RSH",
|
||||
LSELECT: "SELECT",
|
||||
LSTRUCT: "STRUCT",
|
||||
LSWITCH: "SWITCH",
|
||||
LTYPE: "TYPE",
|
||||
LVAR: "VAR",
|
||||
}
|
||||
|
||||
func lexname(lex int) string {
|
||||
for i := 0; i < len(lexn); i++ {
|
||||
if lexn[i].lex == lex {
|
||||
return lexn[i].name
|
||||
}
|
||||
if s, ok := lexn[lex]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("LEX-%d", lex)
|
||||
}
|
||||
|
||||
var yytfix = []struct {
|
||||
have string
|
||||
want string
|
||||
}{
|
||||
{"$end", "EOF"},
|
||||
{"LASOP", "op="},
|
||||
{"LBREAK", "break"},
|
||||
{"LCASE", "case"},
|
||||
{"LCHAN", "chan"},
|
||||
{"LCOLAS", ":="},
|
||||
{"LCONST", "const"},
|
||||
{"LCONTINUE", "continue"},
|
||||
{"LDDD", "..."},
|
||||
{"LDEFAULT", "default"},
|
||||
{"LDEFER", "defer"},
|
||||
{"LELSE", "else"},
|
||||
{"LFALL", "fallthrough"},
|
||||
{"LFOR", "for"},
|
||||
{"LFUNC", "func"},
|
||||
{"LGO", "go"},
|
||||
{"LGOTO", "goto"},
|
||||
{"LIF", "if"},
|
||||
{"LIMPORT", "import"},
|
||||
{"LINTERFACE", "interface"},
|
||||
{"LMAP", "map"},
|
||||
{"LNAME", "name"},
|
||||
{"LPACKAGE", "package"},
|
||||
{"LRANGE", "range"},
|
||||
{"LRETURN", "return"},
|
||||
{"LSELECT", "select"},
|
||||
{"LSTRUCT", "struct"},
|
||||
{"LSWITCH", "switch"},
|
||||
{"LTYPE", "type"},
|
||||
{"LVAR", "var"},
|
||||
{"LANDAND", "&&"},
|
||||
{"LANDNOT", "&^"},
|
||||
{"LBODY", "{"},
|
||||
{"LCOMM", "<-"},
|
||||
{"LDEC", "--"},
|
||||
{"LINC", "++"},
|
||||
{"LEQ", "=="},
|
||||
{"LGE", ">="},
|
||||
{"LGT", ">"},
|
||||
{"LLE", "<="},
|
||||
{"LLT", "<"},
|
||||
{"LLSH", "<<"},
|
||||
{"LRSH", ">>"},
|
||||
{"LOROR", "||"},
|
||||
{"LNE", "!="},
|
||||
var yytfix = map[string]string{
|
||||
"$end": "EOF",
|
||||
"LASOP": "op=",
|
||||
"LBREAK": "break",
|
||||
"LCASE": "case",
|
||||
"LCHAN": "chan",
|
||||
"LCOLAS": ":=",
|
||||
"LCONST": "const",
|
||||
"LCONTINUE": "continue",
|
||||
"LDDD": "...",
|
||||
"LDEFAULT": "default",
|
||||
"LDEFER": "defer",
|
||||
"LELSE": "else",
|
||||
"LFALL": "fallthrough",
|
||||
"LFOR": "for",
|
||||
"LFUNC": "func",
|
||||
"LGO": "go",
|
||||
"LGOTO": "goto",
|
||||
"LIF": "if",
|
||||
"LIMPORT": "import",
|
||||
"LINTERFACE": "interface",
|
||||
"LMAP": "map",
|
||||
"LNAME": "name",
|
||||
"LPACKAGE": "package",
|
||||
"LRANGE": "range",
|
||||
"LRETURN": "return",
|
||||
"LSELECT": "select",
|
||||
"LSTRUCT": "struct",
|
||||
"LSWITCH": "switch",
|
||||
"LTYPE": "type",
|
||||
"LVAR": "var",
|
||||
"LANDAND": "&&",
|
||||
"LANDNOT": "&^",
|
||||
"LBODY": "{",
|
||||
"LCOMM": "<-",
|
||||
"LDEC": "--",
|
||||
"LINC": "++",
|
||||
"LEQ": "==",
|
||||
"LGE": ">=",
|
||||
"LGT": ">",
|
||||
"LLE": "<=",
|
||||
"LLT": "<",
|
||||
"LLSH": "<<",
|
||||
"LRSH": ">>",
|
||||
"LOROR": "||",
|
||||
"LNE": "!=",
|
||||
// spell out to avoid confusion with punctuation in error messages
|
||||
{"';'", "semicolon or newline"},
|
||||
{"','", "comma"},
|
||||
"';'": "semicolon or newline",
|
||||
"','": "comma",
|
||||
}
|
||||
|
||||
func init() {
|
||||
yyErrorVerbose = true
|
||||
|
||||
Outer:
|
||||
for i, s := range yyToknames {
|
||||
// Apply yytfix if possible.
|
||||
for _, fix := range yytfix {
|
||||
if s == fix.have {
|
||||
yyToknames[i] = fix.want
|
||||
continue Outer
|
||||
}
|
||||
}
|
||||
|
||||
if fix, ok := yytfix[s]; ok {
|
||||
yyToknames[i] = fix
|
||||
} else if len(s) == 3 && s[0] == '\'' && s[2] == '\'' {
|
||||
// Turn 'x' into x.
|
||||
if len(s) == 3 && s[0] == '\'' && s[2] == '\'' {
|
||||
yyToknames[i] = s[1:2]
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import (
|
|||
"cmd/internal/obj"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/// implements float arihmetic
|
||||
|
|
@ -154,30 +153,6 @@ func mpatoflt(a *Mpflt, as string) {
|
|||
as = as[1:]
|
||||
}
|
||||
|
||||
// The spec requires accepting exponents that fit in int32.
|
||||
// Don't accept much more than that.
|
||||
// Count digits in exponent and stop early if there are too many.
|
||||
if i := strings.Index(as, "e"); i >= 0 {
|
||||
i++
|
||||
if i < len(as) && (as[i] == '-' || as[i] == '+') {
|
||||
i++
|
||||
}
|
||||
for i < len(as) && as[i] == '0' {
|
||||
i++
|
||||
}
|
||||
// TODO(rsc): This should be > 10, because we're supposed
|
||||
// to accept any signed 32-bit int as an exponent.
|
||||
// But that's not working terribly well, so we deviate from the
|
||||
// spec in order to make sure that what we accept works.
|
||||
// We can remove this restriction once those larger exponents work.
|
||||
// See golang.org/issue/11326 and test/fixedbugs/issue11326*.go.
|
||||
if len(as)-i > 8 {
|
||||
Yyerror("malformed constant: %s (exponent too large)", as)
|
||||
a.Val.SetUint64(0)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
f, ok := a.Val.SetString(as)
|
||||
if !ok {
|
||||
// At the moment we lose precise error cause;
|
||||
|
|
|
|||
|
|
@ -72,10 +72,7 @@ func dumpobj() {
|
|||
|
||||
fmt.Fprintf(bout, "\n!\n")
|
||||
|
||||
var externs *NodeList
|
||||
if externdcl != nil {
|
||||
externs = externdcl.End
|
||||
}
|
||||
externs := len(externdcl)
|
||||
|
||||
dumpglobls()
|
||||
dumptypestructs()
|
||||
|
|
@ -83,8 +80,8 @@ func dumpobj() {
|
|||
// Dump extra globals.
|
||||
tmp := externdcl
|
||||
|
||||
if externs != nil {
|
||||
externdcl = externs.Next
|
||||
if externdcl != nil {
|
||||
externdcl = externdcl[externs:]
|
||||
}
|
||||
dumpglobls()
|
||||
externdcl = tmp
|
||||
|
|
@ -107,11 +104,8 @@ func dumpobj() {
|
|||
}
|
||||
|
||||
func dumpglobls() {
|
||||
var n *Node
|
||||
|
||||
// add globals
|
||||
for l := externdcl; l != nil; l = l.Next {
|
||||
n = l.N
|
||||
for _, n := range externdcl {
|
||||
if n.Op != ONAME {
|
||||
continue
|
||||
}
|
||||
|
|
@ -126,12 +120,10 @@ func dumpglobls() {
|
|||
continue
|
||||
}
|
||||
dowidth(n.Type)
|
||||
|
||||
ggloblnod(n)
|
||||
}
|
||||
|
||||
for l := funcsyms; l != nil; l = l.Next {
|
||||
n = l.N
|
||||
for _, n := range funcsyms {
|
||||
dsymptr(n.Sym, 0, n.Sym.Def.Func.Shortname.Sym, 0)
|
||||
ggloblsym(n.Sym, int32(Widthptr), obj.DUPOK|obj.RODATA)
|
||||
}
|
||||
|
|
@ -187,10 +179,6 @@ func duint32(s *Sym, off int, v uint32) int {
|
|||
return duintxx(s, off, uint64(v), 4)
|
||||
}
|
||||
|
||||
func duint64(s *Sym, off int, v uint64) int {
|
||||
return duintxx(s, off, v, 8)
|
||||
}
|
||||
|
||||
func duintptr(s *Sym, off int, v uint64) int {
|
||||
return duintxx(s, off, v, Widthptr)
|
||||
}
|
||||
|
|
@ -284,25 +272,6 @@ func slicebytes(nam *Node, s string, len int) {
|
|||
duintxx(nam.Sym, off, uint64(len), Widthint)
|
||||
}
|
||||
|
||||
func dstringptr(s *Sym, off int, str string) int {
|
||||
off = int(Rnd(int64(off), int64(Widthptr)))
|
||||
p := Thearch.Gins(obj.ADATA, nil, nil)
|
||||
p.From.Type = obj.TYPE_MEM
|
||||
p.From.Name = obj.NAME_EXTERN
|
||||
p.From.Sym = Linksym(s)
|
||||
p.From.Offset = int64(off)
|
||||
p.From3 = new(obj.Addr)
|
||||
p.From3.Type = obj.TYPE_CONST
|
||||
p.From3.Offset = int64(Widthptr)
|
||||
|
||||
Datastring(str+"\x00", &p.To) // TODO(rsc): Remove NUL
|
||||
p.To.Type = obj.TYPE_ADDR
|
||||
p.To.Etype = Simtype[TINT]
|
||||
off += Widthptr
|
||||
|
||||
return off
|
||||
}
|
||||
|
||||
func Datastring(s string, a *obj.Addr) {
|
||||
_, symdata := stringsym(s)
|
||||
a.Type = obj.TYPE_MEM
|
||||
|
|
|
|||
|
|
@ -165,6 +165,8 @@ func emitptrargsmap() {
|
|||
ggloblsym(sym, int32(off), obj.RODATA|obj.LOCAL)
|
||||
}
|
||||
|
||||
// cmpstackvarlt reports whether the stack variable a sorts before b.
|
||||
//
|
||||
// Sort the list of stack variables. Autos after anything else,
|
||||
// within autos, unused after used, within used, things with
|
||||
// pointers first, zeroed things first, and then decreasing size.
|
||||
|
|
@ -173,48 +175,48 @@ func emitptrargsmap() {
|
|||
// really means, in memory, things with pointers needing zeroing at
|
||||
// the top of the stack and increasing in size.
|
||||
// Non-autos sort on offset.
|
||||
func cmpstackvar(a *Node, b *Node) int {
|
||||
func cmpstackvarlt(a, b *Node) bool {
|
||||
if a.Class != b.Class {
|
||||
if a.Class == PAUTO {
|
||||
return +1
|
||||
return false
|
||||
}
|
||||
return -1
|
||||
return true
|
||||
}
|
||||
|
||||
if a.Class != PAUTO {
|
||||
if a.Xoffset < b.Xoffset {
|
||||
return -1
|
||||
return true
|
||||
}
|
||||
if a.Xoffset > b.Xoffset {
|
||||
return +1
|
||||
return false
|
||||
}
|
||||
return 0
|
||||
return false
|
||||
}
|
||||
|
||||
if a.Used != b.Used {
|
||||
return obj.Bool2int(b.Used) - obj.Bool2int(a.Used)
|
||||
return a.Used
|
||||
}
|
||||
|
||||
ap := obj.Bool2int(haspointers(a.Type))
|
||||
bp := obj.Bool2int(haspointers(b.Type))
|
||||
ap := haspointers(a.Type)
|
||||
bp := haspointers(b.Type)
|
||||
if ap != bp {
|
||||
return bp - ap
|
||||
return ap
|
||||
}
|
||||
|
||||
ap = obj.Bool2int(a.Name.Needzero)
|
||||
bp = obj.Bool2int(b.Name.Needzero)
|
||||
ap = a.Name.Needzero
|
||||
bp = b.Name.Needzero
|
||||
if ap != bp {
|
||||
return bp - ap
|
||||
return ap
|
||||
}
|
||||
|
||||
if a.Type.Width < b.Type.Width {
|
||||
return +1
|
||||
return false
|
||||
}
|
||||
if a.Type.Width > b.Type.Width {
|
||||
return -1
|
||||
return true
|
||||
}
|
||||
|
||||
return stringsCompare(a.Sym.Name, b.Sym.Name)
|
||||
return a.Sym.Name < b.Sym.Name
|
||||
}
|
||||
|
||||
// stkdelta records the stack offset delta for a node
|
||||
|
|
@ -240,7 +242,7 @@ func allocauto(ptxt *obj.Prog) {
|
|||
|
||||
markautoused(ptxt)
|
||||
|
||||
listsort(&Curfn.Func.Dcl, cmpstackvar)
|
||||
listsort(&Curfn.Func.Dcl, cmpstackvarlt)
|
||||
|
||||
// Unused autos are at the end, chop 'em off.
|
||||
ll := Curfn.Func.Dcl
|
||||
|
|
|
|||
176
src/cmd/compile/internal/gc/pgen_test.go
Normal file
176
src/cmd/compile/internal/gc/pgen_test.go
Normal file
|
|
@ -0,0 +1,176 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Test all code paths for cmpstackvarlt.
|
||||
func TestCmpstackvar(t *testing.T) {
|
||||
testdata := []struct {
|
||||
a, b Node
|
||||
lt bool
|
||||
}{
|
||||
{
|
||||
Node{Class: PAUTO},
|
||||
Node{Class: PFUNC},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Node{Class: PFUNC},
|
||||
Node{Class: PAUTO},
|
||||
true,
|
||||
},
|
||||
{
|
||||
Node{Class: PFUNC, Xoffset: 0},
|
||||
Node{Class: PFUNC, Xoffset: 10},
|
||||
true,
|
||||
},
|
||||
{
|
||||
Node{Class: PFUNC, Xoffset: 20},
|
||||
Node{Class: PFUNC, Xoffset: 10},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Node{Class: PFUNC, Xoffset: 10},
|
||||
Node{Class: PFUNC, Xoffset: 10},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Node{Class: PAUTO, Used: true},
|
||||
Node{Class: PAUTO, Used: false},
|
||||
true,
|
||||
},
|
||||
{
|
||||
Node{Class: PAUTO, Used: false},
|
||||
Node{Class: PAUTO, Used: true},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Node{Class: PAUTO, Type: &Type{Haspointers: 1}}, // haspointers -> false
|
||||
Node{Class: PAUTO, Type: &Type{Haspointers: 2}}, // haspointers -> true
|
||||
false,
|
||||
},
|
||||
{
|
||||
Node{Class: PAUTO, Type: &Type{Haspointers: 2}}, // haspointers -> true
|
||||
Node{Class: PAUTO, Type: &Type{Haspointers: 1}}, // haspointers -> false
|
||||
true,
|
||||
},
|
||||
{
|
||||
Node{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: true}},
|
||||
Node{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: false}},
|
||||
true,
|
||||
},
|
||||
{
|
||||
Node{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: false}},
|
||||
Node{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: true}},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Node{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}},
|
||||
Node{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Node{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}},
|
||||
Node{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}},
|
||||
true,
|
||||
},
|
||||
{
|
||||
Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
|
||||
Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
|
||||
true,
|
||||
},
|
||||
{
|
||||
Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
|
||||
Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
|
||||
Node{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
|
||||
false,
|
||||
},
|
||||
}
|
||||
for _, d := range testdata {
|
||||
got := cmpstackvarlt(&d.a, &d.b)
|
||||
if got != d.lt {
|
||||
t.Errorf("want %#v < %#v", d.a, d.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func slice2nodelist(s []*Node) *NodeList {
|
||||
var nl *NodeList
|
||||
for _, n := range s {
|
||||
nl = list(nl, n)
|
||||
}
|
||||
return nl
|
||||
}
|
||||
|
||||
func nodelist2slice(nl *NodeList) []*Node {
|
||||
var s []*Node
|
||||
for l := nl; l != nil; l = l.Next {
|
||||
s = append(s, l.N)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestListsort(t *testing.T) {
|
||||
inp := []*Node{
|
||||
{Class: PFUNC, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PFUNC, Xoffset: 0, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PFUNC, Xoffset: 10, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PFUNC, Xoffset: 20, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Used: true, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Type: &Type{Haspointers: 1}, Name: &Name{}, Sym: &Sym{}}, // haspointers -> false
|
||||
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: true}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
|
||||
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
|
||||
}
|
||||
want := []*Node{
|
||||
{Class: PFUNC, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PFUNC, Xoffset: 0, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PFUNC, Xoffset: 10, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PFUNC, Xoffset: 20, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Used: true, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: true}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Type: &Type{Width: 2}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
|
||||
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}},
|
||||
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}},
|
||||
{Class: PAUTO, Type: &Type{Haspointers: 1}, Name: &Name{}, Sym: &Sym{}}, // haspointers -> false
|
||||
}
|
||||
// haspointers updates Type.Haspointers as a side effect, so
|
||||
// exercise this function on all inputs so that reflect.DeepEqual
|
||||
// doesn't produce false positives.
|
||||
for i := range want {
|
||||
haspointers(want[i].Type)
|
||||
haspointers(inp[i].Type)
|
||||
}
|
||||
|
||||
nl := slice2nodelist(inp)
|
||||
listsort(&nl, cmpstackvarlt)
|
||||
got := nodelist2slice(nl)
|
||||
if !reflect.DeepEqual(want, got) {
|
||||
t.Error("listsort failed")
|
||||
for i := range got {
|
||||
g := got[i]
|
||||
w := want[i]
|
||||
eq := reflect.DeepEqual(w, g)
|
||||
if !eq {
|
||||
t.Log(i, w, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -92,14 +92,6 @@ type Liveness struct {
|
|||
livepointers []Bvec
|
||||
}
|
||||
|
||||
func xmalloc(size uint32) interface{} {
|
||||
result := (interface{})(make([]byte, size))
|
||||
if result == nil {
|
||||
Fatalf("malloc failed")
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Constructs a new basic block containing a single instruction.
|
||||
func newblock(prog *obj.Prog) *BasicBlock {
|
||||
if prog == nil {
|
||||
|
|
@ -115,13 +107,6 @@ func newblock(prog *obj.Prog) *BasicBlock {
|
|||
return result
|
||||
}
|
||||
|
||||
// Frees a basic block and all of its leaf data structures.
|
||||
func freeblock(bb *BasicBlock) {
|
||||
if bb == nil {
|
||||
Fatalf("freeblock: cannot free nil")
|
||||
}
|
||||
}
|
||||
|
||||
// Adds an edge between two basic blocks by making from a predecessor of to and
|
||||
// to a successor of from.
|
||||
func addedge(from *BasicBlock, to *BasicBlock) {
|
||||
|
|
|
|||
|
|
@ -523,20 +523,15 @@ type TempVar struct {
|
|||
merge *TempVar // merge var with this one
|
||||
start int64 // smallest Prog.pc in live range
|
||||
end int64 // largest Prog.pc in live range
|
||||
addr uint8 // address taken - no accurate end
|
||||
removed uint8 // removed from program
|
||||
addr bool // address taken - no accurate end
|
||||
removed bool // removed from program
|
||||
}
|
||||
|
||||
// startcmp sorts TempVars by start, then id, then symbol name.
|
||||
type startcmp []*TempVar
|
||||
|
||||
func (x startcmp) Len() int {
|
||||
return len(x)
|
||||
}
|
||||
|
||||
func (x startcmp) Swap(i, j int) {
|
||||
x[i], x[j] = x[j], x[i]
|
||||
}
|
||||
|
||||
func (x startcmp) Len() int { return len(x) }
|
||||
func (x startcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x startcmp) Less(i, j int) bool {
|
||||
a := x[i]
|
||||
b := x[j]
|
||||
|
|
@ -556,7 +551,7 @@ func (x startcmp) Less(i, j int) bool {
|
|||
return int(a.def.Id-b.def.Id) < 0
|
||||
}
|
||||
if a.node != b.node {
|
||||
return stringsCompare(a.node.Sym.Name, b.node.Sym.Name) < 0
|
||||
return a.node.Sym.Name < b.node.Sym.Name
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
@ -577,22 +572,11 @@ func mergetemp(firstp *obj.Prog) {
|
|||
}
|
||||
|
||||
// Build list of all mergeable variables.
|
||||
nvar := 0
|
||||
var vars []*TempVar
|
||||
for l := Curfn.Func.Dcl; l != nil; l = l.Next {
|
||||
if canmerge(l.N) {
|
||||
nvar++
|
||||
}
|
||||
}
|
||||
|
||||
var_ := make([]TempVar, nvar)
|
||||
nvar = 0
|
||||
var n *Node
|
||||
var v *TempVar
|
||||
for l := Curfn.Func.Dcl; l != nil; l = l.Next {
|
||||
n = l.N
|
||||
if canmerge(n) {
|
||||
v = &var_[nvar]
|
||||
nvar++
|
||||
if n := l.N; canmerge(n) {
|
||||
v := &TempVar{}
|
||||
vars = append(vars, v)
|
||||
n.SetOpt(v)
|
||||
v.node = n
|
||||
}
|
||||
|
|
@ -607,8 +591,8 @@ func mergetemp(firstp *obj.Prog) {
|
|||
if p.From.Node != nil && ((p.From.Node).(*Node)).Opt() != nil && p.To.Node != nil && ((p.To.Node).(*Node)).Opt() != nil {
|
||||
Fatalf("double node %v", p)
|
||||
}
|
||||
v = nil
|
||||
n, _ = p.From.Node.(*Node)
|
||||
var v *TempVar
|
||||
n, _ := p.From.Node.(*Node)
|
||||
if n != nil {
|
||||
v, _ = n.Opt().(*TempVar)
|
||||
}
|
||||
|
|
@ -625,7 +609,7 @@ func mergetemp(firstp *obj.Prog) {
|
|||
f.Data = v.use
|
||||
v.use = f
|
||||
if n == p.From.Node && (p.Info.Flags&LeftAddr != 0) {
|
||||
v.addr = 1
|
||||
v.addr = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -637,9 +621,8 @@ func mergetemp(firstp *obj.Prog) {
|
|||
nkill := 0
|
||||
|
||||
// Special case.
|
||||
for i := 0; i < len(var_); i++ {
|
||||
v = &var_[i]
|
||||
if v.addr != 0 {
|
||||
for _, v := range vars {
|
||||
if v.addr {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -650,7 +633,7 @@ func mergetemp(firstp *obj.Prog) {
|
|||
if p.To.Node == v.node && (p.Info.Flags&RightWrite != 0) && p.Info.Flags&RightRead == 0 {
|
||||
p.As = obj.ANOP
|
||||
p.To = obj.Addr{}
|
||||
v.removed = 1
|
||||
v.removed = true
|
||||
if debugmerge > 0 && Debug['v'] != 0 {
|
||||
fmt.Printf("drop write-only %v\n", v.node.Sym)
|
||||
}
|
||||
|
|
@ -673,7 +656,7 @@ func mergetemp(firstp *obj.Prog) {
|
|||
if p.From.Node == v.node && p1.To.Node == v.node && (p.Info.Flags&Move != 0) && (p.Info.Flags|p1.Info.Flags)&(LeftAddr|RightAddr) == 0 && p.Info.Flags&SizeAny == p1.Info.Flags&SizeAny {
|
||||
p1.From = p.From
|
||||
Thearch.Excise(f)
|
||||
v.removed = 1
|
||||
v.removed = true
|
||||
if debugmerge > 0 && Debug['v'] != 0 {
|
||||
fmt.Printf("drop immediate-use %v\n", v.node.Sym)
|
||||
}
|
||||
|
|
@ -687,29 +670,25 @@ func mergetemp(firstp *obj.Prog) {
|
|||
// Traverse live range of each variable to set start, end.
|
||||
// Each flood uses a new value of gen so that we don't have
|
||||
// to clear all the r->active words after each variable.
|
||||
gen := int32(0)
|
||||
gen := uint32(0)
|
||||
|
||||
for i := 0; i < len(var_); i++ {
|
||||
v = &var_[i]
|
||||
for _, v := range vars {
|
||||
gen++
|
||||
for f := v.use; f != nil; f = f.Data.(*Flow) {
|
||||
mergewalk(v, f, uint32(gen))
|
||||
mergewalk(v, f, gen)
|
||||
}
|
||||
if v.addr != 0 {
|
||||
if v.addr {
|
||||
gen++
|
||||
for f := v.use; f != nil; f = f.Data.(*Flow) {
|
||||
varkillwalk(v, f, uint32(gen))
|
||||
varkillwalk(v, f, gen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort variables by start.
|
||||
bystart := make([]*TempVar, len(var_))
|
||||
|
||||
for i := 0; i < len(var_); i++ {
|
||||
bystart[i] = &var_[i]
|
||||
}
|
||||
sort.Sort(startcmp(bystart[:len(var_)]))
|
||||
bystart := make([]*TempVar, len(vars))
|
||||
copy(bystart, vars)
|
||||
sort.Sort(startcmp(bystart))
|
||||
|
||||
// List of in-use variables, sorted by end, so that the ones that
|
||||
// will last the longest are the earliest ones in the array.
|
||||
|
|
@ -717,40 +696,35 @@ func mergetemp(firstp *obj.Prog) {
|
|||
// In theory we should use a sorted tree so that insertions are
|
||||
// guaranteed O(log n) and then the loop is guaranteed O(n log n).
|
||||
// In practice, it doesn't really matter.
|
||||
inuse := make([]*TempVar, len(var_))
|
||||
inuse := make([]*TempVar, len(bystart))
|
||||
|
||||
ninuse := 0
|
||||
nfree := len(var_)
|
||||
var t *Type
|
||||
var v1 *TempVar
|
||||
var j int
|
||||
for i := 0; i < len(var_); i++ {
|
||||
v = bystart[i]
|
||||
nfree := len(bystart)
|
||||
for _, v := range bystart {
|
||||
if debugmerge > 0 && Debug['v'] != 0 {
|
||||
fmt.Printf("consider %v: removed=%d\n", Nconv(v.node, obj.FmtSharp), v.removed)
|
||||
fmt.Printf("consider %v: removed=%t\n", Nconv(v.node, obj.FmtSharp), v.removed)
|
||||
}
|
||||
|
||||
if v.removed != 0 {
|
||||
if v.removed {
|
||||
continue
|
||||
}
|
||||
|
||||
// Expire no longer in use.
|
||||
for ninuse > 0 && inuse[ninuse-1].end < v.start {
|
||||
ninuse--
|
||||
v1 = inuse[ninuse]
|
||||
nfree--
|
||||
inuse[nfree] = v1
|
||||
inuse[nfree] = inuse[ninuse]
|
||||
}
|
||||
|
||||
if debugmerge > 0 && Debug['v'] != 0 {
|
||||
fmt.Printf("consider %v: removed=%d nfree=%d nvar=%d\n", Nconv(v.node, obj.FmtSharp), v.removed, nfree, len(var_))
|
||||
fmt.Printf("consider %v: removed=%t nfree=%d nvar=%d\n", Nconv(v.node, obj.FmtSharp), v.removed, nfree, len(bystart))
|
||||
}
|
||||
|
||||
// Find old temp to reuse if possible.
|
||||
t = v.node.Type
|
||||
t := v.node.Type
|
||||
|
||||
for j = nfree; j < len(var_); j++ {
|
||||
v1 = inuse[j]
|
||||
for j := nfree; j < len(inuse); j++ {
|
||||
v1 := inuse[j]
|
||||
if debugmerge > 0 && Debug['v'] != 0 {
|
||||
fmt.Printf("consider %v: maybe %v: type=%v,%v addrtaken=%v,%v\n", Nconv(v.node, obj.FmtSharp), Nconv(v1.node, obj.FmtSharp), t, v1.node.Type, v.node.Addrtaken, v1.node.Addrtaken)
|
||||
}
|
||||
|
|
@ -774,7 +748,7 @@ func mergetemp(firstp *obj.Prog) {
|
|||
}
|
||||
|
||||
// Sort v into inuse.
|
||||
j = ninuse
|
||||
j := ninuse
|
||||
ninuse++
|
||||
|
||||
for j > 0 && inuse[j-1].end < v.end {
|
||||
|
|
@ -786,16 +760,14 @@ func mergetemp(firstp *obj.Prog) {
|
|||
}
|
||||
|
||||
if debugmerge > 0 && Debug['v'] != 0 {
|
||||
fmt.Printf("%v [%d - %d]\n", Curfn.Func.Nname.Sym, len(var_), nkill)
|
||||
var v *TempVar
|
||||
for i := 0; i < len(var_); i++ {
|
||||
v = &var_[i]
|
||||
fmt.Printf("%v [%d - %d]\n", Curfn.Func.Nname.Sym, len(vars), nkill)
|
||||
for _, v := range vars {
|
||||
fmt.Printf("var %v %v %d-%d", Nconv(v.node, obj.FmtSharp), v.node.Type, v.start, v.end)
|
||||
if v.addr != 0 {
|
||||
fmt.Printf(" addr=1")
|
||||
if v.addr {
|
||||
fmt.Printf(" addr=true")
|
||||
}
|
||||
if v.removed != 0 {
|
||||
fmt.Printf(" dead=1")
|
||||
if v.removed {
|
||||
fmt.Printf(" removed=true")
|
||||
}
|
||||
if v.merge != nil {
|
||||
fmt.Printf(" merge %v", Nconv(v.merge.node, obj.FmtSharp))
|
||||
|
|
@ -814,16 +786,16 @@ func mergetemp(firstp *obj.Prog) {
|
|||
// Update node references to use merged temporaries.
|
||||
for f := g.Start; f != nil; f = f.Link {
|
||||
p := f.Prog
|
||||
n, _ = p.From.Node.(*Node)
|
||||
n, _ := p.From.Node.(*Node)
|
||||
if n != nil {
|
||||
v, _ = n.Opt().(*TempVar)
|
||||
v, _ := n.Opt().(*TempVar)
|
||||
if v != nil && v.merge != nil {
|
||||
p.From.Node = v.merge.node
|
||||
}
|
||||
}
|
||||
n, _ = p.To.Node.(*Node)
|
||||
if n != nil {
|
||||
v, _ = n.Opt().(*TempVar)
|
||||
v, _ := n.Opt().(*TempVar)
|
||||
if v != nil && v.merge != nil {
|
||||
p.To.Node = v.merge.node
|
||||
}
|
||||
|
|
@ -831,17 +803,16 @@ func mergetemp(firstp *obj.Prog) {
|
|||
}
|
||||
|
||||
// Delete merged nodes from declaration list.
|
||||
var l *NodeList
|
||||
for lp := &Curfn.Func.Dcl; ; {
|
||||
l = *lp
|
||||
l := *lp
|
||||
if l == nil {
|
||||
break
|
||||
}
|
||||
|
||||
Curfn.Func.Dcl.End = l
|
||||
n = l.N
|
||||
v, _ = n.Opt().(*TempVar)
|
||||
if v != nil && (v.merge != nil || v.removed != 0) {
|
||||
n := l.N
|
||||
v, _ := n.Opt().(*TempVar)
|
||||
if v != nil && (v.merge != nil || v.removed) {
|
||||
*lp = l.Next
|
||||
continue
|
||||
}
|
||||
|
|
@ -850,8 +821,8 @@ func mergetemp(firstp *obj.Prog) {
|
|||
}
|
||||
|
||||
// Clear aux structures.
|
||||
for i := 0; i < len(var_); i++ {
|
||||
var_[i].node.SetOpt(nil)
|
||||
for _, v := range vars {
|
||||
v.node.SetOpt(nil)
|
||||
}
|
||||
|
||||
Flowend(g)
|
||||
|
|
|
|||
|
|
@ -32,8 +32,8 @@ var noinst_pkgs = []string{"sync", "sync/atomic"}
|
|||
|
||||
func ispkgin(pkgs []string) bool {
|
||||
if myimportpath != "" {
|
||||
for i := 0; i < len(pkgs); i++ {
|
||||
if myimportpath == pkgs[i] {
|
||||
for _, p := range pkgs {
|
||||
if myimportpath == p {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -167,88 +167,11 @@ func walkrange(n *Node) {
|
|||
default:
|
||||
Fatalf("walkrange")
|
||||
|
||||
// Lower n into runtime·memclr if possible, for
|
||||
// fast zeroing of slices and arrays (issue 5373).
|
||||
// Look for instances of
|
||||
//
|
||||
// for i := range a {
|
||||
// a[i] = zero
|
||||
// }
|
||||
//
|
||||
// in which the evaluation of a is side-effect-free.
|
||||
case TARRAY:
|
||||
if Debug['N'] == 0 {
|
||||
if flag_race == 0 {
|
||||
if v1 != nil {
|
||||
if v2 == nil {
|
||||
if n.Nbody != nil {
|
||||
if n.Nbody.N != nil { // at least one statement in body
|
||||
if n.Nbody.Next == nil { // at most one statement in body
|
||||
tmp := n.Nbody.N // first statement of body
|
||||
if tmp.Op == OAS {
|
||||
if tmp.Left.Op == OINDEX {
|
||||
if samesafeexpr(tmp.Left.Left, a) {
|
||||
if samesafeexpr(tmp.Left.Right, v1) {
|
||||
if t.Type.Width > 0 {
|
||||
if iszero(tmp.Right) {
|
||||
// Convert to
|
||||
// if len(a) != 0 {
|
||||
// hp = &a[0]
|
||||
// hn = len(a)*sizeof(elem(a))
|
||||
// memclr(hp, hn)
|
||||
// i = len(a) - 1
|
||||
// }
|
||||
n.Op = OIF
|
||||
|
||||
n.Nbody = nil
|
||||
n.Left = Nod(ONE, Nod(OLEN, a, nil), Nodintconst(0))
|
||||
|
||||
// hp = &a[0]
|
||||
hp := temp(Ptrto(Types[TUINT8]))
|
||||
|
||||
tmp := Nod(OINDEX, a, Nodintconst(0))
|
||||
tmp.Bounded = true
|
||||
tmp = Nod(OADDR, tmp, nil)
|
||||
tmp = Nod(OCONVNOP, tmp, nil)
|
||||
tmp.Type = Ptrto(Types[TUINT8])
|
||||
n.Nbody = list(n.Nbody, Nod(OAS, hp, tmp))
|
||||
|
||||
// hn = len(a) * sizeof(elem(a))
|
||||
hn := temp(Types[TUINTPTR])
|
||||
|
||||
tmp = Nod(OLEN, a, nil)
|
||||
tmp = Nod(OMUL, tmp, Nodintconst(t.Type.Width))
|
||||
tmp = conv(tmp, Types[TUINTPTR])
|
||||
n.Nbody = list(n.Nbody, Nod(OAS, hn, tmp))
|
||||
|
||||
// memclr(hp, hn)
|
||||
fn := mkcall("memclr", nil, nil, hp, hn)
|
||||
|
||||
n.Nbody = list(n.Nbody, fn)
|
||||
|
||||
// i = len(a) - 1
|
||||
v1 = Nod(OAS, v1, Nod(OSUB, Nod(OLEN, a, nil), Nodintconst(1)))
|
||||
|
||||
n.Nbody = list(n.Nbody, v1)
|
||||
|
||||
typecheck(&n.Left, Erv)
|
||||
typechecklist(n.Nbody, Etop)
|
||||
walkstmt(&n)
|
||||
if memclrrange(n, v1, v2, a) {
|
||||
lineno = int32(lno)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// orderstmt arranged for a copy of the array/slice variable if needed.
|
||||
ha := a
|
||||
|
|
@ -404,3 +327,82 @@ func walkrange(n *Node) {
|
|||
|
||||
lineno = int32(lno)
|
||||
}
|
||||
|
||||
// Lower n into runtime·memclr if possible, for
|
||||
// fast zeroing of slices and arrays (issue 5373).
|
||||
// Look for instances of
|
||||
//
|
||||
// for i := range a {
|
||||
// a[i] = zero
|
||||
// }
|
||||
//
|
||||
// in which the evaluation of a is side-effect-free.
|
||||
//
|
||||
// Parameters are as in walkrange: "for v1, v2 = range a".
|
||||
func memclrrange(n, v1, v2, a *Node) bool {
|
||||
if Debug['N'] != 0 || flag_race != 0 {
|
||||
return false
|
||||
}
|
||||
if v1 == nil || v2 != nil {
|
||||
return false
|
||||
}
|
||||
if n.Nbody == nil || n.Nbody.N == nil || n.Nbody.Next != nil {
|
||||
return false
|
||||
}
|
||||
stmt := n.Nbody.N // only stmt in body
|
||||
if stmt.Op != OAS || stmt.Left.Op != OINDEX {
|
||||
return false
|
||||
}
|
||||
if !samesafeexpr(stmt.Left.Left, a) || !samesafeexpr(stmt.Left.Right, v1) {
|
||||
return false
|
||||
}
|
||||
elemsize := n.Type.Type.Width
|
||||
if elemsize <= 0 || !iszero(stmt.Right) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Convert to
|
||||
// if len(a) != 0 {
|
||||
// hp = &a[0]
|
||||
// hn = len(a)*sizeof(elem(a))
|
||||
// memclr(hp, hn)
|
||||
// i = len(a) - 1
|
||||
// }
|
||||
n.Op = OIF
|
||||
|
||||
n.Nbody = nil
|
||||
n.Left = Nod(ONE, Nod(OLEN, a, nil), Nodintconst(0))
|
||||
|
||||
// hp = &a[0]
|
||||
hp := temp(Ptrto(Types[TUINT8]))
|
||||
|
||||
tmp := Nod(OINDEX, a, Nodintconst(0))
|
||||
tmp.Bounded = true
|
||||
tmp = Nod(OADDR, tmp, nil)
|
||||
tmp = Nod(OCONVNOP, tmp, nil)
|
||||
tmp.Type = Ptrto(Types[TUINT8])
|
||||
n.Nbody = list(n.Nbody, Nod(OAS, hp, tmp))
|
||||
|
||||
// hn = len(a) * sizeof(elem(a))
|
||||
hn := temp(Types[TUINTPTR])
|
||||
|
||||
tmp = Nod(OLEN, a, nil)
|
||||
tmp = Nod(OMUL, tmp, Nodintconst(elemsize))
|
||||
tmp = conv(tmp, Types[TUINTPTR])
|
||||
n.Nbody = list(n.Nbody, Nod(OAS, hn, tmp))
|
||||
|
||||
// memclr(hp, hn)
|
||||
fn := mkcall("memclr", nil, nil, hp, hn)
|
||||
|
||||
n.Nbody = list(n.Nbody, fn)
|
||||
|
||||
// i = len(a) - 1
|
||||
v1 = Nod(OAS, v1, Nod(OSUB, Nod(OLEN, a, nil), Nodintconst(1)))
|
||||
|
||||
n.Nbody = list(n.Nbody, v1)
|
||||
|
||||
typecheck(&n.Left, Erv)
|
||||
typechecklist(n.Nbody, Etop)
|
||||
walkstmt(&n)
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"cmd/internal/obj"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
)
|
||||
|
||||
/*
|
||||
|
|
@ -16,93 +17,30 @@ import (
|
|||
*/
|
||||
var signatlist *NodeList
|
||||
|
||||
func sigcmp(a *Sig, b *Sig) int {
|
||||
i := stringsCompare(a.name, b.name)
|
||||
if i != 0 {
|
||||
return i
|
||||
}
|
||||
if a.pkg == b.pkg {
|
||||
return 0
|
||||
}
|
||||
if a.pkg == nil {
|
||||
return -1
|
||||
}
|
||||
if b.pkg == nil {
|
||||
return +1
|
||||
}
|
||||
return stringsCompare(a.pkg.Path, b.pkg.Path)
|
||||
// byMethodNameAndPackagePath sorts method signatures by name, then package path.
|
||||
type byMethodNameAndPackagePath []*Sig
|
||||
|
||||
func (x byMethodNameAndPackagePath) Len() int { return len(x) }
|
||||
func (x byMethodNameAndPackagePath) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x byMethodNameAndPackagePath) Less(i, j int) bool {
|
||||
return siglt(x[i], x[j])
|
||||
}
|
||||
|
||||
func lsort(l *Sig, f func(*Sig, *Sig) int) *Sig {
|
||||
if l == nil || l.link == nil {
|
||||
return l
|
||||
// siglt reports whether a < b
|
||||
func siglt(a, b *Sig) bool {
|
||||
if a.name != b.name {
|
||||
return a.name < b.name
|
||||
}
|
||||
|
||||
l1 := l
|
||||
l2 := l
|
||||
for {
|
||||
l2 = l2.link
|
||||
if l2 == nil {
|
||||
break
|
||||
if a.pkg == b.pkg {
|
||||
return false
|
||||
}
|
||||
l2 = l2.link
|
||||
if l2 == nil {
|
||||
break
|
||||
if a.pkg == nil {
|
||||
return true
|
||||
}
|
||||
l1 = l1.link
|
||||
if b.pkg == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
l2 = l1.link
|
||||
l1.link = nil
|
||||
l1 = lsort(l, f)
|
||||
l2 = lsort(l2, f)
|
||||
|
||||
/* set up lead element */
|
||||
if f(l1, l2) < 0 {
|
||||
l = l1
|
||||
l1 = l1.link
|
||||
} else {
|
||||
l = l2
|
||||
l2 = l2.link
|
||||
}
|
||||
|
||||
le := l
|
||||
|
||||
for {
|
||||
if l1 == nil {
|
||||
for l2 != nil {
|
||||
le.link = l2
|
||||
le = l2
|
||||
l2 = l2.link
|
||||
}
|
||||
|
||||
le.link = nil
|
||||
break
|
||||
}
|
||||
|
||||
if l2 == nil {
|
||||
for l1 != nil {
|
||||
le.link = l1
|
||||
le = l1
|
||||
l1 = l1.link
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
if f(l1, l2) < 0 {
|
||||
le.link = l1
|
||||
le = l1
|
||||
l1 = l1.link
|
||||
} else {
|
||||
le.link = l2
|
||||
le = l2
|
||||
l2 = l2.link
|
||||
}
|
||||
}
|
||||
|
||||
le.link = nil
|
||||
return l
|
||||
return a.pkg.Path < b.pkg.Path
|
||||
}
|
||||
|
||||
// Builds a type representing a Bucket structure for
|
||||
|
|
@ -335,11 +273,9 @@ func methodfunc(f *Type, receiver *Type) *Type {
|
|||
return t
|
||||
}
|
||||
|
||||
/*
|
||||
* return methods of non-interface type t, sorted by name.
|
||||
* generates stub functions as needed.
|
||||
*/
|
||||
func methods(t *Type) *Sig {
|
||||
// methods returns the methods of the non-interface type t, sorted by name.
|
||||
// Generates stub functions as needed.
|
||||
func methods(t *Type) []*Sig {
|
||||
// method type
|
||||
mt := methtype(t, 0)
|
||||
|
||||
|
|
@ -357,11 +293,7 @@ func methods(t *Type) *Sig {
|
|||
|
||||
// make list of methods for t,
|
||||
// generating code if necessary.
|
||||
var a *Sig
|
||||
|
||||
var this *Type
|
||||
var b *Sig
|
||||
var method *Sym
|
||||
var ms []*Sig
|
||||
for f := mt.Xmethod; f != nil; f = f.Down {
|
||||
if f.Etype != TFIELD {
|
||||
Fatalf("methods: not field %v", f)
|
||||
|
|
@ -376,7 +308,7 @@ func methods(t *Type) *Sig {
|
|||
continue
|
||||
}
|
||||
|
||||
method = f.Sym
|
||||
method := f.Sym
|
||||
if method == nil {
|
||||
continue
|
||||
}
|
||||
|
|
@ -385,7 +317,7 @@ func methods(t *Type) *Sig {
|
|||
// if pointer receiver but non-pointer t and
|
||||
// this is not an embedded pointer inside a struct,
|
||||
// method does not apply.
|
||||
this = getthisx(f.Type).Type.Type
|
||||
this := getthisx(f.Type).Type.Type
|
||||
|
||||
if Isptr[this.Etype] && this.Type == t {
|
||||
continue
|
||||
|
|
@ -394,55 +326,48 @@ func methods(t *Type) *Sig {
|
|||
continue
|
||||
}
|
||||
|
||||
b = new(Sig)
|
||||
b.link = a
|
||||
a = b
|
||||
var sig Sig
|
||||
ms = append(ms, &sig)
|
||||
|
||||
a.name = method.Name
|
||||
sig.name = method.Name
|
||||
if !exportname(method.Name) {
|
||||
if method.Pkg == nil {
|
||||
Fatalf("methods: missing package")
|
||||
}
|
||||
a.pkg = method.Pkg
|
||||
sig.pkg = method.Pkg
|
||||
}
|
||||
|
||||
a.isym = methodsym(method, it, 1)
|
||||
a.tsym = methodsym(method, t, 0)
|
||||
a.type_ = methodfunc(f.Type, t)
|
||||
a.mtype = methodfunc(f.Type, nil)
|
||||
sig.isym = methodsym(method, it, 1)
|
||||
sig.tsym = methodsym(method, t, 0)
|
||||
sig.type_ = methodfunc(f.Type, t)
|
||||
sig.mtype = methodfunc(f.Type, nil)
|
||||
|
||||
if a.isym.Flags&SymSiggen == 0 {
|
||||
a.isym.Flags |= SymSiggen
|
||||
if sig.isym.Flags&SymSiggen == 0 {
|
||||
sig.isym.Flags |= SymSiggen
|
||||
if !Eqtype(this, it) || this.Width < Types[Tptr].Width {
|
||||
compiling_wrappers = 1
|
||||
genwrapper(it, f, a.isym, 1)
|
||||
genwrapper(it, f, sig.isym, 1)
|
||||
compiling_wrappers = 0
|
||||
}
|
||||
}
|
||||
|
||||
if a.tsym.Flags&SymSiggen == 0 {
|
||||
a.tsym.Flags |= SymSiggen
|
||||
if sig.tsym.Flags&SymSiggen == 0 {
|
||||
sig.tsym.Flags |= SymSiggen
|
||||
if !Eqtype(this, t) {
|
||||
compiling_wrappers = 1
|
||||
genwrapper(t, f, a.tsym, 0)
|
||||
genwrapper(t, f, sig.tsym, 0)
|
||||
compiling_wrappers = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lsort(a, sigcmp)
|
||||
sort.Sort(byMethodNameAndPackagePath(ms))
|
||||
return ms
|
||||
}
|
||||
|
||||
/*
|
||||
* return methods of interface type t, sorted by name.
|
||||
*/
|
||||
func imethods(t *Type) *Sig {
|
||||
var a *Sig
|
||||
var method *Sym
|
||||
var isym *Sym
|
||||
|
||||
var all *Sig
|
||||
var last *Sig
|
||||
// imethods returns the methods of the interface type t, sorted by name.
|
||||
func imethods(t *Type) []*Sig {
|
||||
var methods []*Sig
|
||||
for f := t.Type; f != nil; f = f.Down {
|
||||
if f.Etype != TFIELD {
|
||||
Fatalf("imethods: not field")
|
||||
|
|
@ -450,29 +375,28 @@ func imethods(t *Type) *Sig {
|
|||
if f.Type.Etype != TFUNC || f.Sym == nil {
|
||||
continue
|
||||
}
|
||||
method = f.Sym
|
||||
a = new(Sig)
|
||||
a.name = method.Name
|
||||
method := f.Sym
|
||||
var sig = Sig{
|
||||
name: method.Name,
|
||||
}
|
||||
if !exportname(method.Name) {
|
||||
if method.Pkg == nil {
|
||||
Fatalf("imethods: missing package")
|
||||
}
|
||||
a.pkg = method.Pkg
|
||||
sig.pkg = method.Pkg
|
||||
}
|
||||
|
||||
a.mtype = f.Type
|
||||
a.offset = 0
|
||||
a.type_ = methodfunc(f.Type, nil)
|
||||
sig.mtype = f.Type
|
||||
sig.offset = 0
|
||||
sig.type_ = methodfunc(f.Type, nil)
|
||||
|
||||
if last != nil && sigcmp(last, a) >= 0 {
|
||||
Fatalf("sigcmp vs sortinter %s %s", last.name, a.name)
|
||||
if n := len(methods); n > 0 {
|
||||
last := methods[n-1]
|
||||
if !(siglt(last, &sig)) {
|
||||
Fatalf("sigcmp vs sortinter %s %s", last.name, sig.name)
|
||||
}
|
||||
if last == nil {
|
||||
all = a
|
||||
} else {
|
||||
last.link = a
|
||||
}
|
||||
last = a
|
||||
methods = append(methods, &sig)
|
||||
|
||||
// Compiler can only refer to wrappers for non-blank methods.
|
||||
if isblanksym(method) {
|
||||
|
|
@ -483,7 +407,7 @@ func imethods(t *Type) *Sig {
|
|||
// IfaceType.Method is not in the reflect data.
|
||||
// Generate the method body, so that compiled
|
||||
// code can refer to it.
|
||||
isym = methodsym(method, t, 0)
|
||||
isym := methodsym(method, t, 0)
|
||||
|
||||
if isym.Flags&SymSiggen == 0 {
|
||||
isym.Flags |= SymSiggen
|
||||
|
|
@ -491,7 +415,7 @@ func imethods(t *Type) *Sig {
|
|||
}
|
||||
}
|
||||
|
||||
return all
|
||||
return methods
|
||||
}
|
||||
|
||||
var dimportpath_gopkg *Pkg
|
||||
|
|
@ -559,7 +483,7 @@ func dgopkgpath(s *Sym, ot int, pkg *Pkg) int {
|
|||
*/
|
||||
func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
|
||||
m := methods(t)
|
||||
if t.Sym == nil && m == nil {
|
||||
if t.Sym == nil && len(m) == 0 {
|
||||
return off
|
||||
}
|
||||
|
||||
|
|
@ -568,10 +492,8 @@ func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
|
|||
|
||||
dsymptr(sym, ptroff, sym, off)
|
||||
|
||||
n := 0
|
||||
for a := m; a != nil; a = a.link {
|
||||
for _, a := range m {
|
||||
dtypesym(a.type_)
|
||||
n++
|
||||
}
|
||||
|
||||
ot := off
|
||||
|
|
@ -591,11 +513,12 @@ func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
|
|||
// slice header
|
||||
ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
|
||||
|
||||
n := len(m)
|
||||
ot = duintxx(s, ot, uint64(n), Widthint)
|
||||
ot = duintxx(s, ot, uint64(n), Widthint)
|
||||
|
||||
// methods
|
||||
for a := m; a != nil; a = a.link {
|
||||
for _, a := range m {
|
||||
// method
|
||||
// ../../runtime/type.go:/method
|
||||
ot = dgostringptr(s, ot, a.name)
|
||||
|
|
@ -943,10 +866,8 @@ func weaktypesym(t *Type) *Sym {
|
|||
return s
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns 1 if t has a reflexive equality operator.
|
||||
* That is, if x==x for all x of type t.
|
||||
*/
|
||||
// isreflexive reports whether t has a reflexive equality operator.
|
||||
// That is, if x==x for all x of type t.
|
||||
func isreflexive(t *Type) bool {
|
||||
switch t.Etype {
|
||||
case TBOOL,
|
||||
|
|
@ -987,7 +908,6 @@ func isreflexive(t *Type) bool {
|
|||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
|
||||
default:
|
||||
|
|
@ -996,6 +916,56 @@ func isreflexive(t *Type) bool {
|
|||
}
|
||||
}
|
||||
|
||||
// needkeyupdate reports whether map updates with t as a key
|
||||
// need the key to be updated.
|
||||
func needkeyupdate(t *Type) bool {
|
||||
switch t.Etype {
|
||||
case TBOOL,
|
||||
TINT,
|
||||
TUINT,
|
||||
TINT8,
|
||||
TUINT8,
|
||||
TINT16,
|
||||
TUINT16,
|
||||
TINT32,
|
||||
TUINT32,
|
||||
TINT64,
|
||||
TUINT64,
|
||||
TUINTPTR,
|
||||
TPTR32,
|
||||
TPTR64,
|
||||
TUNSAFEPTR,
|
||||
TCHAN:
|
||||
return false
|
||||
|
||||
case TFLOAT32, // floats can be +0/-0
|
||||
TFLOAT64,
|
||||
TCOMPLEX64,
|
||||
TCOMPLEX128,
|
||||
TINTER,
|
||||
TSTRING: // strings might have smaller backing stores
|
||||
return true
|
||||
|
||||
case TARRAY:
|
||||
if Isslice(t) {
|
||||
Fatalf("slice can't be a map key: %v", t)
|
||||
}
|
||||
return needkeyupdate(t.Type)
|
||||
|
||||
case TSTRUCT:
|
||||
for t1 := t.Type; t1 != nil; t1 = t1.Down {
|
||||
if needkeyupdate(t1.Type) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
default:
|
||||
Fatalf("bad type for map key: %v", t)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func dtypesym(t *Type) *Sym {
|
||||
// Replace byte, rune aliases with real type.
|
||||
// They've been separate internally to make error messages
|
||||
|
|
@ -1124,28 +1094,27 @@ ok:
|
|||
|
||||
case TINTER:
|
||||
m := imethods(t)
|
||||
n := 0
|
||||
for a := m; a != nil; a = a.link {
|
||||
n := len(m)
|
||||
for _, a := range m {
|
||||
dtypesym(a.type_)
|
||||
n++
|
||||
}
|
||||
|
||||
// ../../runtime/type.go:/InterfaceType
|
||||
// ../../../runtime/type.go:/InterfaceType
|
||||
ot = dcommontype(s, ot, t)
|
||||
|
||||
xt = ot - 2*Widthptr
|
||||
ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
|
||||
ot = duintxx(s, ot, uint64(n), Widthint)
|
||||
ot = duintxx(s, ot, uint64(n), Widthint)
|
||||
for a := m; a != nil; a = a.link {
|
||||
// ../../runtime/type.go:/imethod
|
||||
for _, a := range m {
|
||||
// ../../../runtime/type.go:/imethod
|
||||
ot = dgostringptr(s, ot, a.name)
|
||||
|
||||
ot = dgopkgpath(s, ot, a.pkg)
|
||||
ot = dsymptr(s, ot, dtypesym(a.type_), 0)
|
||||
}
|
||||
|
||||
// ../../runtime/type.go:/MapType
|
||||
// ../../../runtime/type.go:/MapType
|
||||
case TMAP:
|
||||
s1 := dtypesym(t.Down)
|
||||
|
||||
|
|
@ -1176,6 +1145,7 @@ ok:
|
|||
|
||||
ot = duint16(s, ot, uint16(mapbucket(t).Width))
|
||||
ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Down))))
|
||||
ot = duint8(s, ot, uint8(obj.Bool2int(needkeyupdate(t.Down))))
|
||||
|
||||
case TPTR32, TPTR64:
|
||||
if t.Type.Etype == TANY {
|
||||
|
|
@ -1269,8 +1239,7 @@ func dumptypestructs() {
|
|||
var n *Node
|
||||
|
||||
// copy types from externdcl list to signatlist
|
||||
for l := externdcl; l != nil; l = l.Next {
|
||||
n = l.N
|
||||
for _, n := range externdcl {
|
||||
if n.Op != OTYPE {
|
||||
continue
|
||||
}
|
||||
|
|
|
|||
47
src/cmd/compile/internal/gc/reflect_test.go
Normal file
47
src/cmd/compile/internal/gc/reflect_test.go
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSortingByMethodNameAndPackagePath(t *testing.T) {
|
||||
data := []*Sig{
|
||||
&Sig{name: "b", pkg: &Pkg{Path: "abc"}},
|
||||
&Sig{name: "b", pkg: nil},
|
||||
&Sig{name: "c", pkg: nil},
|
||||
&Sig{name: "c", pkg: &Pkg{Path: "uvw"}},
|
||||
&Sig{name: "c", pkg: nil},
|
||||
&Sig{name: "b", pkg: &Pkg{Path: "xyz"}},
|
||||
&Sig{name: "a", pkg: &Pkg{Path: "abc"}},
|
||||
&Sig{name: "b", pkg: nil},
|
||||
}
|
||||
want := []*Sig{
|
||||
&Sig{name: "a", pkg: &Pkg{Path: "abc"}},
|
||||
&Sig{name: "b", pkg: nil},
|
||||
&Sig{name: "b", pkg: nil},
|
||||
&Sig{name: "b", pkg: &Pkg{Path: "abc"}},
|
||||
&Sig{name: "b", pkg: &Pkg{Path: "xyz"}},
|
||||
&Sig{name: "c", pkg: nil},
|
||||
&Sig{name: "c", pkg: nil},
|
||||
&Sig{name: "c", pkg: &Pkg{Path: "uvw"}},
|
||||
}
|
||||
if len(data) != len(want) {
|
||||
t.Fatal("want and data must match")
|
||||
}
|
||||
if reflect.DeepEqual(data, want) {
|
||||
t.Fatal("data must be shuffled")
|
||||
}
|
||||
sort.Sort(byMethodNameAndPackagePath(data))
|
||||
if !reflect.DeepEqual(data, want) {
|
||||
t.Logf("want: %#v", want)
|
||||
t.Logf("data: %#v", data)
|
||||
t.Errorf("sorting failed")
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -170,7 +170,7 @@ func setaddrs(bit Bits) {
|
|||
|
||||
for bany(&bit) {
|
||||
// convert each bit to a variable
|
||||
i = bnum(bit)
|
||||
i = bnum(&bit)
|
||||
|
||||
node = vars[i].node
|
||||
n = int(vars[i].name)
|
||||
|
|
@ -1321,7 +1321,7 @@ loop2:
|
|||
bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z])
|
||||
}
|
||||
for bany(&bit) {
|
||||
i = bnum(bit)
|
||||
i = bnum(&bit)
|
||||
change = 0
|
||||
paint1(f, i)
|
||||
biclr(&bit, uint(i))
|
||||
|
|
@ -1465,7 +1465,7 @@ func bany(a *Bits) bool {
|
|||
}
|
||||
|
||||
// bnum reports the lowest index of a 1 bit in a.
|
||||
func bnum(a Bits) int {
|
||||
func bnum(a *Bits) int {
|
||||
for i, x := range &a.b { // & to avoid making a copy of a.b
|
||||
if x != 0 {
|
||||
return 64*i + Bitno(x)
|
||||
|
|
@ -1541,7 +1541,7 @@ func (bits Bits) String() string {
|
|||
var buf bytes.Buffer
|
||||
sep := ""
|
||||
for bany(&bits) {
|
||||
i := bnum(bits)
|
||||
i := bnum(&bits)
|
||||
buf.WriteString(sep)
|
||||
sep = " "
|
||||
v := &vars[i]
|
||||
|
|
|
|||
|
|
@ -329,13 +329,13 @@ func staticcopy(l *Node, r *Node, out **NodeList) bool {
|
|||
// copy slice
|
||||
a := inittemps[r]
|
||||
|
||||
n1 := *l
|
||||
n1.Xoffset = l.Xoffset + int64(Array_array)
|
||||
gdata(&n1, Nod(OADDR, a, nil), Widthptr)
|
||||
n1.Xoffset = l.Xoffset + int64(Array_nel)
|
||||
gdata(&n1, r.Right, Widthint)
|
||||
n1.Xoffset = l.Xoffset + int64(Array_cap)
|
||||
gdata(&n1, r.Right, Widthint)
|
||||
n := *l
|
||||
n.Xoffset = l.Xoffset + int64(Array_array)
|
||||
gdata(&n, Nod(OADDR, a, nil), Widthptr)
|
||||
n.Xoffset = l.Xoffset + int64(Array_nel)
|
||||
gdata(&n, r.Right, Widthint)
|
||||
n.Xoffset = l.Xoffset + int64(Array_cap)
|
||||
gdata(&n, r.Right, Widthint)
|
||||
return true
|
||||
}
|
||||
fallthrough
|
||||
|
|
@ -344,24 +344,21 @@ func staticcopy(l *Node, r *Node, out **NodeList) bool {
|
|||
case OSTRUCTLIT:
|
||||
p := initplans[r]
|
||||
|
||||
n1 := *l
|
||||
var e *InitEntry
|
||||
var ll *Node
|
||||
var rr *Node
|
||||
for i := 0; i < len(p.E); i++ {
|
||||
e = &p.E[i]
|
||||
n1.Xoffset = l.Xoffset + e.Xoffset
|
||||
n1.Type = e.Expr.Type
|
||||
n := *l
|
||||
for i := range p.E {
|
||||
e := &p.E[i]
|
||||
n.Xoffset = l.Xoffset + e.Xoffset
|
||||
n.Type = e.Expr.Type
|
||||
if e.Expr.Op == OLITERAL {
|
||||
gdata(&n1, e.Expr, int(n1.Type.Width))
|
||||
gdata(&n, e.Expr, int(n.Type.Width))
|
||||
} else {
|
||||
ll = Nod(OXXX, nil, nil)
|
||||
*ll = n1
|
||||
ll := Nod(OXXX, nil, nil)
|
||||
*ll = n
|
||||
ll.Orig = ll // completely separate copy
|
||||
if !staticassign(ll, e.Expr, out) {
|
||||
// Requires computation, but we're
|
||||
// copying someone else's computation.
|
||||
rr = Nod(OXXX, nil, nil)
|
||||
rr := Nod(OXXX, nil, nil)
|
||||
|
||||
*rr = *orig
|
||||
rr.Orig = rr // completely separate copy
|
||||
|
|
@ -380,8 +377,6 @@ func staticcopy(l *Node, r *Node, out **NodeList) bool {
|
|||
}
|
||||
|
||||
func staticassign(l *Node, r *Node, out **NodeList) bool {
|
||||
var n1 Node
|
||||
|
||||
for r.Op == OCONVNOP {
|
||||
r = r.Left
|
||||
}
|
||||
|
|
@ -404,9 +399,9 @@ func staticassign(l *Node, r *Node, out **NodeList) bool {
|
|||
case OADDR:
|
||||
var nam Node
|
||||
if stataddr(&nam, r.Left) {
|
||||
n1 := *r
|
||||
n1.Left = &nam
|
||||
gdata(l, &n1, int(l.Type.Width))
|
||||
n := *r
|
||||
n.Left = &nam
|
||||
gdata(l, &n, int(l.Type.Width))
|
||||
return true
|
||||
}
|
||||
fallthrough
|
||||
|
|
@ -448,13 +443,13 @@ func staticassign(l *Node, r *Node, out **NodeList) bool {
|
|||
ta.Bound = Mpgetfix(r.Right.Val().U.(*Mpint))
|
||||
a := staticname(ta, 1)
|
||||
inittemps[r] = a
|
||||
n1 = *l
|
||||
n1.Xoffset = l.Xoffset + int64(Array_array)
|
||||
gdata(&n1, Nod(OADDR, a, nil), Widthptr)
|
||||
n1.Xoffset = l.Xoffset + int64(Array_nel)
|
||||
gdata(&n1, r.Right, Widthint)
|
||||
n1.Xoffset = l.Xoffset + int64(Array_cap)
|
||||
gdata(&n1, r.Right, Widthint)
|
||||
n := *l
|
||||
n.Xoffset = l.Xoffset + int64(Array_array)
|
||||
gdata(&n, Nod(OADDR, a, nil), Widthptr)
|
||||
n.Xoffset = l.Xoffset + int64(Array_nel)
|
||||
gdata(&n, r.Right, Widthint)
|
||||
n.Xoffset = l.Xoffset + int64(Array_cap)
|
||||
gdata(&n, r.Right, Widthint)
|
||||
|
||||
// Fall through to init underlying array.
|
||||
l = a
|
||||
|
|
@ -466,19 +461,17 @@ func staticassign(l *Node, r *Node, out **NodeList) bool {
|
|||
initplan(r)
|
||||
|
||||
p := initplans[r]
|
||||
n1 = *l
|
||||
var e *InitEntry
|
||||
var a *Node
|
||||
for i := 0; i < len(p.E); i++ {
|
||||
e = &p.E[i]
|
||||
n1.Xoffset = l.Xoffset + e.Xoffset
|
||||
n1.Type = e.Expr.Type
|
||||
n := *l
|
||||
for i := range p.E {
|
||||
e := &p.E[i]
|
||||
n.Xoffset = l.Xoffset + e.Xoffset
|
||||
n.Type = e.Expr.Type
|
||||
if e.Expr.Op == OLITERAL {
|
||||
gdata(&n1, e.Expr, int(n1.Type.Width))
|
||||
gdata(&n, e.Expr, int(n.Type.Width))
|
||||
} else {
|
||||
setlineno(e.Expr)
|
||||
a = Nod(OXXX, nil, nil)
|
||||
*a = n1
|
||||
a := Nod(OXXX, nil, nil)
|
||||
*a = n
|
||||
a.Orig = a // completely separate copy
|
||||
if !staticassign(a, e.Expr, out) {
|
||||
*out = list(*out, Nod(OAS, a, e.Expr))
|
||||
|
|
@ -569,9 +562,8 @@ func getdyn(n *Node, top int) int {
|
|||
break
|
||||
}
|
||||
|
||||
var value *Node
|
||||
for nl := n.List; nl != nil; nl = nl.Next {
|
||||
value = nl.N.Right
|
||||
value := nl.N.Right
|
||||
mode |= getdyn(value, 0)
|
||||
if mode == MODEDYNAM|MODECONST {
|
||||
break
|
||||
|
|
@ -582,18 +574,15 @@ func getdyn(n *Node, top int) int {
|
|||
}
|
||||
|
||||
func structlit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
|
||||
var r *Node
|
||||
var a *Node
|
||||
var index *Node
|
||||
var value *Node
|
||||
|
||||
for nl := n.List; nl != nil; nl = nl.Next {
|
||||
r = nl.N
|
||||
r := nl.N
|
||||
if r.Op != OKEY {
|
||||
Fatalf("structlit: rhs not OKEY: %v", r)
|
||||
}
|
||||
index = r.Left
|
||||
value = r.Right
|
||||
index := r.Left
|
||||
value := r.Right
|
||||
|
||||
var a *Node
|
||||
|
||||
switch value.Op {
|
||||
case OARRAYLIT:
|
||||
|
|
@ -650,18 +639,15 @@ func structlit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
|
|||
}
|
||||
|
||||
func arraylit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
|
||||
var r *Node
|
||||
var a *Node
|
||||
var index *Node
|
||||
var value *Node
|
||||
|
||||
for l := n.List; l != nil; l = l.Next {
|
||||
r = l.N
|
||||
r := l.N
|
||||
if r.Op != OKEY {
|
||||
Fatalf("arraylit: rhs not OKEY: %v", r)
|
||||
}
|
||||
index = r.Left
|
||||
value = r.Right
|
||||
index := r.Left
|
||||
value := r.Right
|
||||
|
||||
var a *Node
|
||||
|
||||
switch value.Op {
|
||||
case OARRAYLIT:
|
||||
|
|
@ -828,17 +814,14 @@ func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
|||
*init = list(*init, a)
|
||||
|
||||
// put dynamics into slice (6)
|
||||
var value *Node
|
||||
var r *Node
|
||||
var index *Node
|
||||
for l := n.List; l != nil; l = l.Next {
|
||||
r = l.N
|
||||
r := l.N
|
||||
if r.Op != OKEY {
|
||||
Fatalf("slicelit: rhs not OKEY: %v", r)
|
||||
}
|
||||
index = r.Left
|
||||
value = r.Right
|
||||
a = Nod(OINDEX, var_, index)
|
||||
index := r.Left
|
||||
value := r.Right
|
||||
a := Nod(OINDEX, var_, index)
|
||||
a.Bounded = true
|
||||
|
||||
// TODO need to check bounds?
|
||||
|
|
@ -872,10 +855,6 @@ func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
|||
}
|
||||
|
||||
func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
||||
var r *Node
|
||||
var index *Node
|
||||
var value *Node
|
||||
|
||||
ctxt = 0
|
||||
|
||||
// make the map var
|
||||
|
|
@ -889,13 +868,12 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
|||
b := int64(0)
|
||||
|
||||
for l := n.List; l != nil; l = l.Next {
|
||||
r = l.N
|
||||
|
||||
r := l.N
|
||||
if r.Op != OKEY {
|
||||
Fatalf("maplit: rhs not OKEY: %v", r)
|
||||
}
|
||||
index = r.Left
|
||||
value = r.Right
|
||||
index := r.Left
|
||||
value := r.Right
|
||||
|
||||
if isliteral(index) && isliteral(value) {
|
||||
b++
|
||||
|
|
@ -936,17 +914,14 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
|||
vstat := staticname(t, ctxt)
|
||||
|
||||
b := int64(0)
|
||||
var index *Node
|
||||
var r *Node
|
||||
var value *Node
|
||||
for l := n.List; l != nil; l = l.Next {
|
||||
r = l.N
|
||||
r := l.N
|
||||
|
||||
if r.Op != OKEY {
|
||||
Fatalf("maplit: rhs not OKEY: %v", r)
|
||||
}
|
||||
index = r.Left
|
||||
value = r.Right
|
||||
index := r.Left
|
||||
value := r.Right
|
||||
|
||||
if isliteral(index) && isliteral(value) {
|
||||
// build vstat[b].a = key;
|
||||
|
|
@ -981,13 +956,13 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
|||
// for i = 0; i < len(vstat); i++ {
|
||||
// map[vstat[i].a] = vstat[i].b
|
||||
// }
|
||||
index = temp(Types[TINT])
|
||||
index := temp(Types[TINT])
|
||||
|
||||
a = Nod(OINDEX, vstat, index)
|
||||
a.Bounded = true
|
||||
a = Nod(ODOT, a, newname(symb))
|
||||
|
||||
r = Nod(OINDEX, vstat, index)
|
||||
r := Nod(OINDEX, vstat, index)
|
||||
r.Bounded = true
|
||||
r = Nod(ODOT, r, newname(syma))
|
||||
r = Nod(OINDEX, var_, r)
|
||||
|
|
@ -1011,13 +986,13 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
|
|||
|
||||
var val *Node
|
||||
for l := n.List; l != nil; l = l.Next {
|
||||
r = l.N
|
||||
r := l.N
|
||||
|
||||
if r.Op != OKEY {
|
||||
Fatalf("maplit: rhs not OKEY: %v", r)
|
||||
}
|
||||
index = r.Left
|
||||
value = r.Right
|
||||
index := r.Left
|
||||
value := r.Right
|
||||
|
||||
if isliteral(index) && isliteral(value) {
|
||||
continue
|
||||
|
|
@ -1291,9 +1266,8 @@ func initplan(n *Node) {
|
|||
Fatalf("initplan")
|
||||
|
||||
case OARRAYLIT:
|
||||
var a *Node
|
||||
for l := n.List; l != nil; l = l.Next {
|
||||
a = l.N
|
||||
a := l.N
|
||||
if a.Op != OKEY || !Smallintconst(a.Left) {
|
||||
Fatalf("initplan arraylit")
|
||||
}
|
||||
|
|
@ -1301,9 +1275,8 @@ func initplan(n *Node) {
|
|||
}
|
||||
|
||||
case OSTRUCTLIT:
|
||||
var a *Node
|
||||
for l := n.List; l != nil; l = l.Next {
|
||||
a = l.N
|
||||
a := l.N
|
||||
if a.Op != OKEY || a.Left.Type == nil {
|
||||
Fatalf("initplan structlit")
|
||||
}
|
||||
|
|
@ -1311,9 +1284,8 @@ func initplan(n *Node) {
|
|||
}
|
||||
|
||||
case OMAPLIT:
|
||||
var a *Node
|
||||
for l := n.List; l != nil; l = l.Next {
|
||||
a = l.N
|
||||
a := l.N
|
||||
if a.Op != OKEY {
|
||||
Fatalf("initplan maplit")
|
||||
}
|
||||
|
|
@ -1333,13 +1305,11 @@ func addvalue(p *InitPlan, xoffset int64, key *Node, n *Node) {
|
|||
if isvaluelit(n) {
|
||||
initplan(n)
|
||||
q := initplans[n]
|
||||
var e *InitEntry
|
||||
for i := 0; i < len(q.E); i++ {
|
||||
e = entry(p)
|
||||
*e = q.E[i]
|
||||
for _, qe := range q.E {
|
||||
e := entry(p)
|
||||
*e = qe
|
||||
e.Xoffset += xoffset
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2120,10 +2120,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
|
|||
|
||||
// Set receiver (for interface calls)
|
||||
if rcvr != nil {
|
||||
var argStart int64
|
||||
if HasLinkRegister() {
|
||||
argStart += int64(Widthptr)
|
||||
}
|
||||
argStart := Ctxt.FixedFrameSize()
|
||||
if k != callNormal {
|
||||
argStart += int64(2 * Widthptr)
|
||||
}
|
||||
|
|
@ -3737,6 +3734,12 @@ func (s *genState) genValue(v *ssa.Value) {
|
|||
p.To.Type = obj.TYPE_ADDR
|
||||
p.To.Sym = Linksym(Pkglookup("duffzero", Runtimepkg))
|
||||
p.To.Offset = v.AuxInt
|
||||
case ssa.OpAMD64MOVOconst:
|
||||
if v.AuxInt != 0 {
|
||||
v.Unimplementedf("MOVOconst can only do constant=0")
|
||||
}
|
||||
r := regnum(v)
|
||||
opregreg(x86.AXORPS, r, r)
|
||||
|
||||
case ssa.OpCopy: // TODO: lower to MOVQ earlier?
|
||||
if v.Type.IsMemory() {
|
||||
|
|
|
|||
|
|
@ -59,26 +59,21 @@ func adderr(line int, format string, args ...interface{}) {
|
|||
})
|
||||
}
|
||||
|
||||
// errcmp sorts errors by line, then seq, then message.
|
||||
type errcmp []Error
|
||||
|
||||
func (x errcmp) Len() int {
|
||||
return len(x)
|
||||
}
|
||||
|
||||
func (x errcmp) Swap(i, j int) {
|
||||
x[i], x[j] = x[j], x[i]
|
||||
}
|
||||
|
||||
func (x errcmp) Len() int { return len(x) }
|
||||
func (x errcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x errcmp) Less(i, j int) bool {
|
||||
a := &x[i]
|
||||
b := &x[j]
|
||||
if a.lineno != b.lineno {
|
||||
return a.lineno-b.lineno < 0
|
||||
return a.lineno < b.lineno
|
||||
}
|
||||
if a.seq != b.seq {
|
||||
return a.seq-b.seq < 0
|
||||
return a.seq < b.seq
|
||||
}
|
||||
return stringsCompare(a.msg, b.msg) < 0
|
||||
return a.msg < b.msg
|
||||
}
|
||||
|
||||
func Flusherrors() {
|
||||
|
|
@ -86,7 +81,7 @@ func Flusherrors() {
|
|||
if len(errors) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Sort(errcmp(errors[:len(errors)]))
|
||||
sort.Sort(errcmp(errors))
|
||||
for i := 0; i < len(errors); i++ {
|
||||
if i == 0 || errors[i].msg != errors[i-1].msg {
|
||||
fmt.Printf("%s", errors[i].msg)
|
||||
|
|
@ -127,7 +122,7 @@ func Yyerror(format string, args ...interface{}) {
|
|||
|
||||
// An unexpected EOF caused a syntax error. Use the previous
|
||||
// line number since getc generated a fake newline character.
|
||||
if curio.eofnl != 0 {
|
||||
if curio.eofnl {
|
||||
lexlineno = prevlineno
|
||||
}
|
||||
|
||||
|
|
@ -352,23 +347,6 @@ func importdot(opkg *Pkg, pack *Node) {
|
|||
}
|
||||
}
|
||||
|
||||
func gethunk() {
|
||||
nh := int32(NHUNK)
|
||||
if thunk >= 10*NHUNK {
|
||||
nh = 10 * NHUNK
|
||||
}
|
||||
h := string(make([]byte, nh))
|
||||
if h == "" {
|
||||
Flusherrors()
|
||||
Yyerror("out of memory")
|
||||
errorexit()
|
||||
}
|
||||
|
||||
hunk = h
|
||||
nhunk = nh
|
||||
thunk += nh
|
||||
}
|
||||
|
||||
func Nod(op int, nleft *Node, nright *Node) *Node {
|
||||
n := new(Node)
|
||||
n.Op = uint8(op)
|
||||
|
|
@ -612,16 +590,11 @@ func typ(et int) *Type {
|
|||
return t
|
||||
}
|
||||
|
||||
// methcmp sorts by symbol, then by package path for unexported symbols.
|
||||
type methcmp []*Type
|
||||
|
||||
func (x methcmp) Len() int {
|
||||
return len(x)
|
||||
}
|
||||
|
||||
func (x methcmp) Swap(i, j int) {
|
||||
x[i], x[j] = x[j], x[i]
|
||||
}
|
||||
|
||||
func (x methcmp) Len() int { return len(x) }
|
||||
func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x methcmp) Less(i, j int) bool {
|
||||
a := x[i]
|
||||
b := x[j]
|
||||
|
|
@ -632,16 +605,14 @@ func (x methcmp) Less(i, j int) bool {
|
|||
return true
|
||||
}
|
||||
if b.Sym == nil {
|
||||
return 1 < 0
|
||||
return false
|
||||
}
|
||||
k := stringsCompare(a.Sym.Name, b.Sym.Name)
|
||||
if k != 0 {
|
||||
return k < 0
|
||||
if a.Sym.Name != b.Sym.Name {
|
||||
return a.Sym.Name < b.Sym.Name
|
||||
}
|
||||
if !exportname(a.Sym.Name) {
|
||||
k := stringsCompare(a.Sym.Pkg.Path, b.Sym.Pkg.Path)
|
||||
if k != 0 {
|
||||
return k < 0
|
||||
if a.Sym.Pkg.Path != b.Sym.Pkg.Path {
|
||||
return a.Sym.Pkg.Path < b.Sym.Pkg.Path
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -653,24 +624,19 @@ func sortinter(t *Type) *Type {
|
|||
return t
|
||||
}
|
||||
|
||||
i := 0
|
||||
var a []*Type
|
||||
for f := t.Type; f != nil; f = f.Down {
|
||||
i++
|
||||
}
|
||||
a := make([]*Type, i)
|
||||
i = 0
|
||||
var f *Type
|
||||
for f = t.Type; f != nil; f = f.Down {
|
||||
a[i] = f
|
||||
i++
|
||||
}
|
||||
sort.Sort(methcmp(a[:i]))
|
||||
for i--; i >= 0; i-- {
|
||||
a[i].Down = f
|
||||
f = a[i]
|
||||
a = append(a, f)
|
||||
}
|
||||
sort.Sort(methcmp(a))
|
||||
|
||||
t.Type = f
|
||||
n := len(a) // n > 0 due to initial conditions.
|
||||
for i := 0; i < n-1; i++ {
|
||||
a[i].Down = a[i+1]
|
||||
}
|
||||
a[n-1].Down = nil
|
||||
|
||||
t.Type = a[0]
|
||||
return t
|
||||
}
|
||||
|
||||
|
|
@ -1618,34 +1584,33 @@ func Ptrto(t *Type) *Type {
|
|||
}
|
||||
|
||||
func frame(context int) {
|
||||
var l *NodeList
|
||||
|
||||
if context != 0 {
|
||||
fmt.Printf("--- external frame ---\n")
|
||||
l = externdcl
|
||||
} else if Curfn != nil {
|
||||
fmt.Printf("--- %v frame ---\n", Curfn.Func.Nname.Sym)
|
||||
l = Curfn.Func.Dcl
|
||||
} else {
|
||||
for _, n := range externdcl {
|
||||
printframenode(n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var n *Node
|
||||
var w int64
|
||||
for ; l != nil; l = l.Next {
|
||||
n = l.N
|
||||
w = -1
|
||||
if Curfn != nil {
|
||||
fmt.Printf("--- %v frame ---\n", Curfn.Func.Nname.Sym)
|
||||
for l := Curfn.Func.Dcl; l != nil; l = l.Next {
|
||||
printframenode(l.N)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printframenode(n *Node) {
|
||||
w := int64(-1)
|
||||
if n.Type != nil {
|
||||
w = n.Type.Width
|
||||
}
|
||||
switch n.Op {
|
||||
case ONAME:
|
||||
fmt.Printf("%v %v G%d %v width=%d\n", Oconv(int(n.Op), 0), n.Sym, n.Name.Vargen, n.Type, w)
|
||||
|
||||
case OTYPE:
|
||||
fmt.Printf("%v %v width=%d\n", Oconv(int(n.Op), 0), n.Type, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1983,19 +1948,6 @@ func cheapexpr(n *Node, init **NodeList) *Node {
|
|||
return copyexpr(n, n.Type, init)
|
||||
}
|
||||
|
||||
/*
|
||||
* return n in a local variable of type t if it is not already.
|
||||
* the value is guaranteed not to change except by direct
|
||||
* assignment to it.
|
||||
*/
|
||||
func localexpr(n *Node, t *Type, init **NodeList) *Node {
|
||||
if n.Op == ONAME && (!n.Addrtaken || strings.HasPrefix(n.Sym.Name, "autotmp_")) && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && convertop(n.Type, t, nil) == OCONVNOP {
|
||||
return n
|
||||
}
|
||||
|
||||
return copyexpr(n, t, init)
|
||||
}
|
||||
|
||||
func Setmaxarg(t *Type, extra int32) {
|
||||
dowidth(t)
|
||||
w := t.Argwid
|
||||
|
|
@ -2163,17 +2115,17 @@ func adddot(n *Node) *Node {
|
|||
*/
|
||||
type Symlink struct {
|
||||
field *Type
|
||||
good uint8
|
||||
followptr uint8
|
||||
link *Symlink
|
||||
good bool
|
||||
followptr bool
|
||||
}
|
||||
|
||||
var slist *Symlink
|
||||
|
||||
func expand0(t *Type, followptr int) {
|
||||
func expand0(t *Type, followptr bool) {
|
||||
u := t
|
||||
if Isptr[u.Etype] {
|
||||
followptr = 1
|
||||
followptr = true
|
||||
u = u.Type
|
||||
}
|
||||
|
||||
|
|
@ -2187,7 +2139,7 @@ func expand0(t *Type, followptr int) {
|
|||
sl = new(Symlink)
|
||||
sl.field = f
|
||||
sl.link = slist
|
||||
sl.followptr = uint8(followptr)
|
||||
sl.followptr = followptr
|
||||
slist = sl
|
||||
}
|
||||
|
||||
|
|
@ -2205,13 +2157,13 @@ func expand0(t *Type, followptr int) {
|
|||
sl = new(Symlink)
|
||||
sl.field = f
|
||||
sl.link = slist
|
||||
sl.followptr = uint8(followptr)
|
||||
sl.followptr = followptr
|
||||
slist = sl
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func expand1(t *Type, d int, followptr int) {
|
||||
func expand1(t *Type, d int, followptr bool) {
|
||||
if t.Trecur != 0 {
|
||||
return
|
||||
}
|
||||
|
|
@ -2226,7 +2178,7 @@ func expand1(t *Type, d int, followptr int) {
|
|||
|
||||
u := t
|
||||
if Isptr[u.Etype] {
|
||||
followptr = 1
|
||||
followptr = true
|
||||
u = u.Type
|
||||
}
|
||||
|
||||
|
|
@ -2263,7 +2215,7 @@ func expandmeth(t *Type) {
|
|||
// generate all reachable methods
|
||||
slist = nil
|
||||
|
||||
expand1(t, len(dotlist)-1, 0)
|
||||
expand1(t, len(dotlist)-1, false)
|
||||
|
||||
// check each method to be uniquely reachable
|
||||
var c int
|
||||
|
|
@ -2278,7 +2230,7 @@ func expandmeth(t *Type) {
|
|||
if c == 1 {
|
||||
// addot1 may have dug out arbitrary fields, we only want methods.
|
||||
if f.Type.Etype == TFUNC && f.Type.Thistuple > 0 {
|
||||
sl.good = 1
|
||||
sl.good = true
|
||||
sl.field = f
|
||||
}
|
||||
}
|
||||
|
|
@ -2293,13 +2245,13 @@ func expandmeth(t *Type) {
|
|||
|
||||
t.Xmethod = t.Method
|
||||
for sl := slist; sl != nil; sl = sl.link {
|
||||
if sl.good != 0 {
|
||||
if sl.good {
|
||||
// add it to the base type method list
|
||||
f = typ(TFIELD)
|
||||
|
||||
*f = *sl.field
|
||||
f.Embedded = 1 // needs a trampoline
|
||||
if sl.followptr != 0 {
|
||||
if sl.followptr {
|
||||
f.Embedded = 2
|
||||
}
|
||||
f.Down = t.Xmethod
|
||||
|
|
@ -2616,21 +2568,6 @@ func genhash(sym *Sym, t *Type) {
|
|||
colasdefn(n.List, n)
|
||||
ni = n.List.N
|
||||
|
||||
// TODO: with aeshash we don't need these shift/mul parts
|
||||
|
||||
// h = h<<3 | h>>61
|
||||
n.Nbody = list(n.Nbody, Nod(OAS, nh, Nod(OOR, Nod(OLSH, nh, Nodintconst(3)), Nod(ORSH, nh, Nodintconst(int64(Widthptr)*8-3)))))
|
||||
|
||||
// h *= mul
|
||||
// Same multipliers as in runtime.memhash.
|
||||
var mul int64
|
||||
if Widthptr == 4 {
|
||||
mul = 3267000013
|
||||
} else {
|
||||
mul = 23344194077549503
|
||||
}
|
||||
n.Nbody = list(n.Nbody, Nod(OAS, nh, Nod(OMUL, nh, Nodintconst(mul))))
|
||||
|
||||
// h = hashel(&p[i], h)
|
||||
call := Nod(OCALL, hashel, nil)
|
||||
|
||||
|
|
@ -2968,8 +2905,8 @@ func geneq(sym *Sym, t *Type) {
|
|||
safemode = old_safemode
|
||||
}
|
||||
|
||||
func ifacelookdot(s *Sym, t *Type, followptr *int, ignorecase int) *Type {
|
||||
*followptr = 0
|
||||
func ifacelookdot(s *Sym, t *Type, followptr *bool, ignorecase int) *Type {
|
||||
*followptr = false
|
||||
|
||||
if t == nil {
|
||||
return nil
|
||||
|
|
@ -2988,7 +2925,7 @@ func ifacelookdot(s *Sym, t *Type, followptr *int, ignorecase int) *Type {
|
|||
if c == 1 {
|
||||
for i = 0; i < d; i++ {
|
||||
if Isptr[dotlist[i].field.Type.Etype] {
|
||||
*followptr = 1
|
||||
*followptr = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
@ -3046,9 +2983,12 @@ func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool
|
|||
}
|
||||
var tm *Type
|
||||
var imtype *Type
|
||||
var followptr int
|
||||
var followptr bool
|
||||
var rcvr *Type
|
||||
for im := iface.Type; im != nil; im = im.Down {
|
||||
if im.Broke {
|
||||
continue
|
||||
}
|
||||
imtype = methodfunc(im.Type, nil)
|
||||
tm = ifacelookdot(im.Sym, t, &followptr, 0)
|
||||
if tm == nil || tm.Nointerface || !Eqtype(methodfunc(tm.Type, nil), imtype) {
|
||||
|
|
@ -3065,7 +3005,7 @@ func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool
|
|||
// the method does not exist for value types.
|
||||
rcvr = getthisx(tm.Type).Type.Type
|
||||
|
||||
if Isptr[rcvr.Etype] && !Isptr[t0.Etype] && followptr == 0 && !isifacemethod(tm.Type) {
|
||||
if Isptr[rcvr.Etype] && !Isptr[t0.Etype] && !followptr && !isifacemethod(tm.Type) {
|
||||
if false && Debug['r'] != 0 {
|
||||
Yyerror("interface pointer mismatch")
|
||||
}
|
||||
|
|
@ -3420,7 +3360,6 @@ func ngotype(n *Node) *Sym {
|
|||
* only in the last segment of the path, and it makes for happier
|
||||
* users if we escape that as little as possible.
|
||||
*
|
||||
* If you edit this, edit ../ld/lib.c:/^pathtoprefix too.
|
||||
* If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
|
||||
*/
|
||||
func pathtoprefix(s string) string {
|
||||
|
|
@ -3492,17 +3431,13 @@ func isbadimport(path string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
for i := 0; i < len(reservedimports); i++ {
|
||||
if path == reservedimports[i] {
|
||||
for _, ri := range reservedimports {
|
||||
if path == ri {
|
||||
Yyerror("import path %q is reserved and cannot be used", path)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
var s string
|
||||
_ = s
|
||||
var r uint
|
||||
_ = r
|
||||
for _, r := range path {
|
||||
if r == utf8.RuneError {
|
||||
Yyerror("import path contains invalid UTF-8 sequence: %q", path)
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ package gc
|
|||
|
||||
import (
|
||||
"cmd/internal/obj"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
|
@ -779,7 +778,13 @@ func exprcmp(c1, c2 *caseClause) int {
|
|||
if len(a) > len(b) {
|
||||
return +1
|
||||
}
|
||||
return stringsCompare(a, b)
|
||||
if a == b {
|
||||
return 0
|
||||
}
|
||||
if a < b {
|
||||
return -1
|
||||
}
|
||||
return +1
|
||||
}
|
||||
|
||||
return 0
|
||||
|
|
@ -806,43 +811,3 @@ func (x caseClauseByType) Less(i, j int) bool {
|
|||
// sort by ordinal
|
||||
return c1.ordinal < c2.ordinal
|
||||
}
|
||||
|
||||
func dumpcase(cc []*caseClause) {
|
||||
for _, c := range cc {
|
||||
switch c.typ {
|
||||
case caseKindDefault:
|
||||
fmt.Printf("case-default\n")
|
||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
||||
|
||||
case caseKindExprConst:
|
||||
fmt.Printf("case-exprconst\n")
|
||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
||||
|
||||
case caseKindExprVar:
|
||||
fmt.Printf("case-exprvar\n")
|
||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
||||
fmt.Printf("\top=%v\n", Oconv(int(c.node.Left.Op), 0))
|
||||
|
||||
case caseKindTypeNil:
|
||||
fmt.Printf("case-typenil\n")
|
||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
||||
|
||||
case caseKindTypeConst:
|
||||
fmt.Printf("case-typeconst\n")
|
||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
||||
fmt.Printf("\thash=%x\n", c.hash)
|
||||
|
||||
case caseKindTypeVar:
|
||||
fmt.Printf("case-typevar\n")
|
||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
||||
|
||||
default:
|
||||
fmt.Printf("case-???\n")
|
||||
fmt.Printf("\tord=%d\n", c.ordinal)
|
||||
fmt.Printf("\top=%v\n", Oconv(int(c.node.Left.Op), 0))
|
||||
fmt.Printf("\thash=%x\n", c.hash)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
|
|
|
|||
144
src/cmd/compile/internal/gc/swt_test.go
Normal file
144
src/cmd/compile/internal/gc/swt_test.go
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gc
|
||||
|
||||
import (
|
||||
"cmd/compile/internal/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExprcmp(t *testing.T) {
|
||||
testdata := []struct {
|
||||
a, b caseClause
|
||||
want int
|
||||
}{
|
||||
// Non-constants.
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nil, nil), typ: caseKindExprVar},
|
||||
caseClause{node: Nod(OXXX, nil, nil), typ: caseKindExprConst},
|
||||
+1,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nil, nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, nil, nil), typ: caseKindExprVar},
|
||||
-1,
|
||||
},
|
||||
// Type switches
|
||||
{
|
||||
caseClause{node: Nod(OXXX, Nodintconst(0), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, Nodbool(true), nil), typ: caseKindExprConst},
|
||||
-1,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, Nodbool(true), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, Nodintconst(1), nil), typ: caseKindExprConst},
|
||||
+1,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, &Node{Type: &Type{Etype: TBOOL, Vargen: 1}}, nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, &Node{Type: &Type{Etype: TINT, Vargen: 0}}, nil), typ: caseKindExprConst},
|
||||
+1,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, &Node{Type: &Type{Etype: TBOOL, Vargen: 1}}, nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, &Node{Type: &Type{Etype: TINT, Vargen: 1}}, nil), typ: caseKindExprConst},
|
||||
-1,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, &Node{Type: &Type{Etype: TBOOL, Vargen: 0}}, nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, &Node{Type: &Type{Etype: TINT, Vargen: 1}}, nil), typ: caseKindExprConst},
|
||||
-1,
|
||||
},
|
||||
// Constant values.
|
||||
// CTFLT
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{&Mpflt{Val: *big.NewFloat(0.1)}}), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{&Mpflt{Val: *big.NewFloat(0.2)}}), nil), typ: caseKindExprConst},
|
||||
-1,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{&Mpflt{Val: *big.NewFloat(0.1)}}), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{&Mpflt{Val: *big.NewFloat(0.1)}}), nil), typ: caseKindExprConst},
|
||||
0,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{&Mpflt{Val: *big.NewFloat(0.2)}}), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{&Mpflt{Val: *big.NewFloat(0.1)}}), nil), typ: caseKindExprConst},
|
||||
+1,
|
||||
},
|
||||
// CTINT
|
||||
{
|
||||
caseClause{node: Nod(OXXX, Nodintconst(0), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, Nodintconst(1), nil), typ: caseKindExprConst},
|
||||
-1,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, Nodintconst(1), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, Nodintconst(1), nil), typ: caseKindExprConst},
|
||||
0,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, Nodintconst(1), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, Nodintconst(0), nil), typ: caseKindExprConst},
|
||||
+1,
|
||||
},
|
||||
// CTRUNE
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{&Mpint{Val: *big.NewInt('a'), Rune: true}}), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{&Mpint{Val: *big.NewInt('b'), Rune: true}}), nil), typ: caseKindExprConst},
|
||||
-1,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{&Mpint{Val: *big.NewInt('b'), Rune: true}}), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{&Mpint{Val: *big.NewInt('b'), Rune: true}}), nil), typ: caseKindExprConst},
|
||||
0,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{&Mpint{Val: *big.NewInt('b'), Rune: true}}), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{&Mpint{Val: *big.NewInt('a'), Rune: true}}), nil), typ: caseKindExprConst},
|
||||
+1,
|
||||
},
|
||||
// CTSTR
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{"ab"}), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{"abc"}), nil), typ: caseKindExprConst},
|
||||
-1,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{"abc"}), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{"xyz"}), nil), typ: caseKindExprConst},
|
||||
-1,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{"abc"}), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{"abc"}), nil), typ: caseKindExprConst},
|
||||
0,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{"abc"}), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{"ab"}), nil), typ: caseKindExprConst},
|
||||
+1,
|
||||
},
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{"xyz"}), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, nodlit(Val{"abc"}), nil), typ: caseKindExprConst},
|
||||
+1,
|
||||
},
|
||||
// Everything else should compare equal.
|
||||
{
|
||||
caseClause{node: Nod(OXXX, nodnil(), nil), typ: caseKindExprConst},
|
||||
caseClause{node: Nod(OXXX, nodnil(), nil), typ: caseKindExprConst},
|
||||
0,
|
||||
},
|
||||
}
|
||||
for i, d := range testdata {
|
||||
got := exprcmp(&d.a, &d.b)
|
||||
if d.want != got {
|
||||
t.Errorf("%d: exprcmp(a, b) = %d; want %d", i, got, d.want)
|
||||
t.Logf("\ta = caseClause{node: %#v, typ: %#v}", d.a.node, d.a.typ)
|
||||
t.Logf("\tb = caseClause{node: %#v, typ: %#v}", d.b.node, d.b.typ)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -409,9 +409,10 @@ func list(l *NodeList, n *Node) *NodeList {
|
|||
return concat(l, list1(n))
|
||||
}
|
||||
|
||||
// listsort sorts *l in place according to the 3-way comparison function f.
|
||||
// listsort sorts *l in place according to the comparison function lt.
|
||||
// The algorithm expects lt(a, b) to be equivalent to a < b.
|
||||
// The algorithm is mergesort, so it is guaranteed to be O(n log n).
|
||||
func listsort(l **NodeList, f func(*Node, *Node) int) {
|
||||
func listsort(l **NodeList, lt func(*Node, *Node) bool) {
|
||||
if *l == nil || (*l).Next == nil {
|
||||
return
|
||||
}
|
||||
|
|
@ -436,10 +437,10 @@ func listsort(l **NodeList, f func(*Node, *Node) int) {
|
|||
(*l).End = l1
|
||||
|
||||
l1 = *l
|
||||
listsort(&l1, f)
|
||||
listsort(&l2, f)
|
||||
listsort(&l1, lt)
|
||||
listsort(&l2, lt)
|
||||
|
||||
if f(l1.N, l2.N) < 0 {
|
||||
if lt(l1.N, l2.N) {
|
||||
*l = l1
|
||||
} else {
|
||||
*l = l2
|
||||
|
|
@ -451,7 +452,7 @@ func listsort(l **NodeList, f func(*Node, *Node) int) {
|
|||
|
||||
var le *NodeList
|
||||
for (l1 != nil) && (l2 != nil) {
|
||||
for (l1.Next != nil) && f(l1.Next.N, l2.N) < 0 {
|
||||
for (l1.Next != nil) && lt(l1.Next.N, l2.N) {
|
||||
l1 = l1.Next
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import (
|
|||
* marks variables that escape the local frame.
|
||||
* rewrites n->op to be more specific in some cases.
|
||||
*/
|
||||
var typecheckdefstack *NodeList
|
||||
var typecheckdefstack []*Node
|
||||
|
||||
/*
|
||||
* resolve ONONAME to definition, if any.
|
||||
|
|
@ -1026,11 +1026,11 @@ OpSwitch:
|
|||
break
|
||||
}
|
||||
|
||||
if Isconst(n.Right, CTINT) {
|
||||
if !n.Bounded && Isconst(n.Right, CTINT) {
|
||||
x := Mpgetfix(n.Right.Val().U.(*Mpint))
|
||||
if x < 0 {
|
||||
Yyerror("invalid %s index %v (index must be non-negative)", why, n.Right)
|
||||
} else if Isfixedarray(t) && t.Bound > 0 && x >= t.Bound {
|
||||
} else if Isfixedarray(t) && x >= t.Bound {
|
||||
Yyerror("invalid array index %v (out of bounds for %d-element array)", n.Right, t.Bound)
|
||||
} else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.Val().U.(string))) {
|
||||
Yyerror("invalid string index %v (out of bounds for %d-byte string)", n.Right, len(n.Left.Val().U.(string)))
|
||||
|
|
@ -1160,16 +1160,16 @@ OpSwitch:
|
|||
}
|
||||
|
||||
lo := n.Right.Left
|
||||
if lo != nil && checksliceindex(l, lo, tp) < 0 {
|
||||
if lo != nil && !checksliceindex(l, lo, tp) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
hi := n.Right.Right
|
||||
if hi != nil && checksliceindex(l, hi, tp) < 0 {
|
||||
if hi != nil && !checksliceindex(l, hi, tp) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
if checksliceconst(lo, hi) < 0 {
|
||||
if !checksliceconst(lo, hi) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
|
|
@ -1227,21 +1227,21 @@ OpSwitch:
|
|||
}
|
||||
|
||||
lo := n.Right.Left
|
||||
if lo != nil && checksliceindex(l, lo, tp) < 0 {
|
||||
if lo != nil && !checksliceindex(l, lo, tp) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
mid := n.Right.Right.Left
|
||||
if mid != nil && checksliceindex(l, mid, tp) < 0 {
|
||||
if mid != nil && !checksliceindex(l, mid, tp) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
hi := n.Right.Right.Right
|
||||
if hi != nil && checksliceindex(l, hi, tp) < 0 {
|
||||
if hi != nil && !checksliceindex(l, hi, tp) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
if checksliceconst(lo, hi) < 0 || checksliceconst(lo, mid) < 0 || checksliceconst(mid, hi) < 0 {
|
||||
if !checksliceconst(lo, hi) || !checksliceconst(lo, mid) || !checksliceconst(mid, hi) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
|
|
@ -1300,7 +1300,7 @@ OpSwitch:
|
|||
|
||||
n.Op = OCONV
|
||||
n.Type = l.Type
|
||||
if onearg(n, "conversion to %v", l.Type) < 0 {
|
||||
if !onearg(n, "conversion to %v", l.Type) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
|
|
@ -1388,7 +1388,7 @@ OpSwitch:
|
|||
|
||||
case OCAP, OLEN, OREAL, OIMAG:
|
||||
ok |= Erv
|
||||
if onearg(n, "%v", Oconv(int(n.Op), 0)) < 0 {
|
||||
if !onearg(n, "%v", Oconv(int(n.Op), 0)) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
|
|
@ -1484,7 +1484,7 @@ OpSwitch:
|
|||
l = t.Nname
|
||||
r = t.Down.Nname
|
||||
} else {
|
||||
if twoarg(n) < 0 {
|
||||
if !twoarg(n) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
|
|
@ -1538,7 +1538,7 @@ OpSwitch:
|
|||
break OpSwitch
|
||||
|
||||
case OCLOSE:
|
||||
if onearg(n, "%v", Oconv(int(n.Op), 0)) < 0 {
|
||||
if !onearg(n, "%v", Oconv(int(n.Op), 0)) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
|
|
@ -1837,9 +1837,7 @@ OpSwitch:
|
|||
n.Type = nil
|
||||
return
|
||||
}
|
||||
et := obj.Bool2int(checkmake(t, "len", l) < 0)
|
||||
et |= obj.Bool2int(r != nil && checkmake(t, "cap", r) < 0)
|
||||
if et != 0 {
|
||||
if !checkmake(t, "len", l) || r != nil && !checkmake(t, "cap", r) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
|
|
@ -1863,7 +1861,7 @@ OpSwitch:
|
|||
n.Type = nil
|
||||
return
|
||||
}
|
||||
if checkmake(t, "size", l) < 0 {
|
||||
if !checkmake(t, "size", l) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
|
|
@ -1884,7 +1882,7 @@ OpSwitch:
|
|||
n.Type = nil
|
||||
return
|
||||
}
|
||||
if checkmake(t, "buffer", l) < 0 {
|
||||
if !checkmake(t, "buffer", l) {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
|
|
@ -1947,7 +1945,7 @@ OpSwitch:
|
|||
|
||||
case OPANIC:
|
||||
ok |= Etop
|
||||
if onearg(n, "panic") < 0 {
|
||||
if !onearg(n, "panic") {
|
||||
n.Type = nil
|
||||
return
|
||||
}
|
||||
|
|
@ -2228,42 +2226,42 @@ OpSwitch:
|
|||
*/
|
||||
}
|
||||
|
||||
func checksliceindex(l *Node, r *Node, tp *Type) int {
|
||||
func checksliceindex(l *Node, r *Node, tp *Type) bool {
|
||||
t := r.Type
|
||||
if t == nil {
|
||||
return -1
|
||||
return false
|
||||
}
|
||||
if !Isint[t.Etype] {
|
||||
Yyerror("invalid slice index %v (type %v)", r, t)
|
||||
return -1
|
||||
return false
|
||||
}
|
||||
|
||||
if r.Op == OLITERAL {
|
||||
if Mpgetfix(r.Val().U.(*Mpint)) < 0 {
|
||||
Yyerror("invalid slice index %v (index must be non-negative)", r)
|
||||
return -1
|
||||
return false
|
||||
} else if tp != nil && tp.Bound > 0 && Mpgetfix(r.Val().U.(*Mpint)) > tp.Bound {
|
||||
Yyerror("invalid slice index %v (out of bounds for %d-element array)", r, tp.Bound)
|
||||
return -1
|
||||
return false
|
||||
} else if Isconst(l, CTSTR) && Mpgetfix(r.Val().U.(*Mpint)) > int64(len(l.Val().U.(string))) {
|
||||
Yyerror("invalid slice index %v (out of bounds for %d-byte string)", r, len(l.Val().U.(string)))
|
||||
return -1
|
||||
return false
|
||||
} else if Mpcmpfixfix(r.Val().U.(*Mpint), Maxintval[TINT]) > 0 {
|
||||
Yyerror("invalid slice index %v (index too large)", r)
|
||||
return -1
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
return true
|
||||
}
|
||||
|
||||
func checksliceconst(lo *Node, hi *Node) int {
|
||||
func checksliceconst(lo *Node, hi *Node) bool {
|
||||
if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && Mpcmpfixfix(lo.Val().U.(*Mpint), hi.Val().U.(*Mpint)) > 0 {
|
||||
Yyerror("invalid slice index: %v > %v", lo, hi)
|
||||
return -1
|
||||
return false
|
||||
}
|
||||
|
||||
return 0
|
||||
return true
|
||||
}
|
||||
|
||||
func checkdefergo(n *Node) {
|
||||
|
|
@ -2341,14 +2339,14 @@ func implicitstar(nn **Node) {
|
|||
*nn = n
|
||||
}
|
||||
|
||||
func onearg(n *Node, f string, args ...interface{}) int {
|
||||
func onearg(n *Node, f string, args ...interface{}) bool {
|
||||
if n.Left != nil {
|
||||
return 0
|
||||
return true
|
||||
}
|
||||
if n.List == nil {
|
||||
p := fmt.Sprintf(f, args...)
|
||||
Yyerror("missing argument to %s: %v", p, n)
|
||||
return -1
|
||||
return false
|
||||
}
|
||||
|
||||
if n.List.Next != nil {
|
||||
|
|
@ -2356,39 +2354,39 @@ func onearg(n *Node, f string, args ...interface{}) int {
|
|||
Yyerror("too many arguments to %s: %v", p, n)
|
||||
n.Left = n.List.N
|
||||
n.List = nil
|
||||
return -1
|
||||
return false
|
||||
}
|
||||
|
||||
n.Left = n.List.N
|
||||
n.List = nil
|
||||
return 0
|
||||
return true
|
||||
}
|
||||
|
||||
func twoarg(n *Node) int {
|
||||
func twoarg(n *Node) bool {
|
||||
if n.Left != nil {
|
||||
return 0
|
||||
return true
|
||||
}
|
||||
if n.List == nil {
|
||||
Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), n)
|
||||
return -1
|
||||
return false
|
||||
}
|
||||
|
||||
n.Left = n.List.N
|
||||
if n.List.Next == nil {
|
||||
Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), n)
|
||||
n.List = nil
|
||||
return -1
|
||||
return false
|
||||
}
|
||||
|
||||
if n.List.Next.Next != nil {
|
||||
Yyerror("too many arguments to %v - %v", Oconv(int(n.Op), 0), n)
|
||||
n.List = nil
|
||||
return -1
|
||||
return false
|
||||
}
|
||||
|
||||
n.Right = n.List.Next.N
|
||||
n.List = nil
|
||||
return 0
|
||||
return true
|
||||
}
|
||||
|
||||
func lookdot1(errnode *Node, s *Sym, t *Type, f *Type, dostrcmp int) *Type {
|
||||
|
|
@ -2849,20 +2847,25 @@ func keydup(n *Node, hash map[uint32][]*Node) {
|
|||
for _, a := range hash[h] {
|
||||
cmp.Op = OEQ
|
||||
cmp.Left = n
|
||||
b := uint32(0)
|
||||
b := false
|
||||
if a.Op == OCONVIFACE && orign.Op == OCONVIFACE {
|
||||
if Eqtype(a.Left.Type, n.Type) {
|
||||
cmp.Right = a.Left
|
||||
evconst(&cmp)
|
||||
b = uint32(obj.Bool2int(cmp.Val().U.(bool)))
|
||||
if cmp.Op == OLITERAL {
|
||||
// Sometimes evconst fails. See issue 12536.
|
||||
b = cmp.Val().U.(bool)
|
||||
}
|
||||
}
|
||||
} else if Eqtype(a.Type, n.Type) {
|
||||
cmp.Right = a
|
||||
evconst(&cmp)
|
||||
b = uint32(obj.Bool2int(cmp.Val().U.(bool)))
|
||||
if cmp.Op == OLITERAL {
|
||||
b = cmp.Val().U.(bool)
|
||||
}
|
||||
}
|
||||
|
||||
if b != 0 {
|
||||
if b {
|
||||
Yyerror("duplicate key %v in map literal", n)
|
||||
return
|
||||
}
|
||||
|
|
@ -3539,7 +3542,7 @@ var mapqueue *NodeList
|
|||
func copytype(n *Node, t *Type) {
|
||||
if t.Etype == TFORW {
|
||||
// This type isn't computed yet; when it is, update n.
|
||||
t.Copyto = list(t.Copyto, n)
|
||||
t.Copyto = append(t.Copyto, n)
|
||||
|
||||
return
|
||||
}
|
||||
|
|
@ -3564,8 +3567,8 @@ func copytype(n *Node, t *Type) {
|
|||
t.Copyto = nil
|
||||
|
||||
// Update nodes waiting on this type.
|
||||
for ; l != nil; l = l.Next {
|
||||
copytype(l.N, t)
|
||||
for _, n := range l {
|
||||
copytype(n, t)
|
||||
}
|
||||
|
||||
// Double-check use of type as embedded type.
|
||||
|
|
@ -3674,16 +3677,13 @@ func typecheckdef(n *Node) *Node {
|
|||
return n
|
||||
}
|
||||
|
||||
l := new(NodeList)
|
||||
l.N = n
|
||||
l.Next = typecheckdefstack
|
||||
typecheckdefstack = l
|
||||
|
||||
typecheckdefstack = append(typecheckdefstack, n)
|
||||
if n.Walkdef == 2 {
|
||||
Flusherrors()
|
||||
fmt.Printf("typecheckdef loop:")
|
||||
for l := typecheckdefstack; l != nil; l = l.Next {
|
||||
fmt.Printf(" %v", l.N.Sym)
|
||||
for i := len(typecheckdefstack) - 1; i >= 0; i-- {
|
||||
n := typecheckdefstack[i]
|
||||
fmt.Printf(" %v", n.Sym)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
Fatalf("typecheckdef loop")
|
||||
|
|
@ -3819,37 +3819,38 @@ ret:
|
|||
if n.Op != OLITERAL && n.Type != nil && isideal(n.Type) {
|
||||
Fatalf("got %v for %v", n.Type, n)
|
||||
}
|
||||
if typecheckdefstack.N != n {
|
||||
last := len(typecheckdefstack) - 1
|
||||
if typecheckdefstack[last] != n {
|
||||
Fatalf("typecheckdefstack mismatch")
|
||||
}
|
||||
l = typecheckdefstack
|
||||
typecheckdefstack = l.Next
|
||||
typecheckdefstack[last] = nil
|
||||
typecheckdefstack = typecheckdefstack[:last]
|
||||
|
||||
lineno = int32(lno)
|
||||
n.Walkdef = 1
|
||||
return n
|
||||
}
|
||||
|
||||
func checkmake(t *Type, arg string, n *Node) int {
|
||||
func checkmake(t *Type, arg string, n *Node) bool {
|
||||
if n.Op == OLITERAL {
|
||||
switch n.Val().Ctype() {
|
||||
case CTINT, CTRUNE, CTFLT, CTCPLX:
|
||||
n.SetVal(toint(n.Val()))
|
||||
if mpcmpfixc(n.Val().U.(*Mpint), 0) < 0 {
|
||||
Yyerror("negative %s argument in make(%v)", arg, t)
|
||||
return -1
|
||||
return false
|
||||
}
|
||||
|
||||
if Mpcmpfixfix(n.Val().U.(*Mpint), Maxintval[TINT]) > 0 {
|
||||
Yyerror("%s argument too large in make(%v)", arg, t)
|
||||
return -1
|
||||
return false
|
||||
}
|
||||
|
||||
// Delay defaultlit until after we've checked range, to avoid
|
||||
// a redundant "constant NNN overflows int" error.
|
||||
defaultlit(&n, Types[TINT])
|
||||
|
||||
return 0
|
||||
return true
|
||||
|
||||
default:
|
||||
break
|
||||
|
|
@ -3858,13 +3859,13 @@ func checkmake(t *Type, arg string, n *Node) int {
|
|||
|
||||
if !Isint[n.Type.Etype] && n.Type.Etype != TIDEAL {
|
||||
Yyerror("non-integer %s argument in make(%v) - %v", arg, t, n.Type)
|
||||
return -1
|
||||
return false
|
||||
}
|
||||
|
||||
// Defaultlit still necessary for non-constant: n might be 1<<k.
|
||||
defaultlit(&n, Types[TINT])
|
||||
|
||||
return 0
|
||||
return true
|
||||
}
|
||||
|
||||
func markbreak(n *Node, implicit *Node) {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (n *Node) Line() string {
|
||||
|
|
@ -18,41 +17,6 @@ func atoi(s string) int {
|
|||
return int(n)
|
||||
}
|
||||
|
||||
func isalnum(c int) bool {
|
||||
return isalpha(c) || isdigit(c)
|
||||
}
|
||||
|
||||
func isalpha(c int) bool {
|
||||
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
|
||||
}
|
||||
|
||||
func isdigit(c int) bool {
|
||||
return '0' <= c && c <= '9'
|
||||
}
|
||||
|
||||
func plan9quote(s string) string {
|
||||
if s == "" {
|
||||
return "'" + strings.Replace(s, "'", "''", -1) + "'"
|
||||
}
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] <= ' ' || s[i] == '\'' {
|
||||
return "'" + strings.Replace(s, "'", "''", -1) + "'"
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// strings.Compare, introduced in Go 1.5.
|
||||
func stringsCompare(a, b string) int {
|
||||
if a == b {
|
||||
return 0
|
||||
}
|
||||
if a < b {
|
||||
return -1
|
||||
}
|
||||
return +1
|
||||
}
|
||||
|
||||
var atExitFuncs []func()
|
||||
|
||||
func AtExit(f func()) {
|
||||
|
|
|
|||
|
|
@ -1356,7 +1356,7 @@ func walkexpr(np **Node, init **NodeList) {
|
|||
}
|
||||
|
||||
// s + "badgerbadgerbadger" == "badgerbadgerbadger"
|
||||
if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && cmpslit(n.Right, n.Left.List.Next.N) == 0 {
|
||||
if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && strlit(n.Right) == strlit(n.Left.List.Next.N) {
|
||||
r := Nod(int(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
|
||||
typecheck(&r, Erv)
|
||||
walkexpr(&r, init)
|
||||
|
|
@ -2225,8 +2225,6 @@ func needwritebarrier(l *Node, r *Node) bool {
|
|||
|
||||
// TODO(rsc): Perhaps componentgen should run before this.
|
||||
|
||||
var applywritebarrier_bv Bvec
|
||||
|
||||
func applywritebarrier(n *Node, init **NodeList) *Node {
|
||||
if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) {
|
||||
if Debug_wb > 1 {
|
||||
|
|
|
|||
|
|
@ -160,7 +160,7 @@ func fixlbrace(lbr int) {
|
|||
// set up for another one now that we're done.
|
||||
// See comment in lex.C about loophack.
|
||||
if lbr == LBODY {
|
||||
loophack = 1
|
||||
loophack = true
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -545,30 +545,18 @@ hard:
|
|||
return
|
||||
}
|
||||
|
||||
func intLiteral(n *gc.Node) (x int64, ok bool) {
|
||||
switch {
|
||||
case n == nil:
|
||||
return
|
||||
case gc.Isconst(n, gc.CTINT):
|
||||
return n.Int(), true
|
||||
case gc.Isconst(n, gc.CTBOOL):
|
||||
return int64(obj.Bool2int(n.Bool())), true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// gins is called by the front end.
|
||||
// It synthesizes some multiple-instruction sequences
|
||||
// so the front end can stay simpler.
|
||||
func gins(as int, f, t *gc.Node) *obj.Prog {
|
||||
if as >= obj.A_ARCHSPECIFIC {
|
||||
if x, ok := intLiteral(f); ok {
|
||||
if x, ok := f.IntLiteral(); ok {
|
||||
ginscon(as, x, t)
|
||||
return nil // caller must not use
|
||||
}
|
||||
}
|
||||
if as == ppc64.ACMP || as == ppc64.ACMPU {
|
||||
if x, ok := intLiteral(t); ok {
|
||||
if x, ok := t.IntLiteral(); ok {
|
||||
ginscon2(as, f, x)
|
||||
return nil // caller must not use
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,13 +69,10 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
|
|||
}
|
||||
if cnt < int64(4*gc.Widthptr) {
|
||||
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
|
||||
p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
|
||||
p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, gc.Ctxt.FixedFrameSize()+frame+lo+i)
|
||||
}
|
||||
// TODO(dfc): https://golang.org/issue/12108
|
||||
// If DUFFZERO is used inside a tail call (see genwrapper) it will
|
||||
// overwrite the link register.
|
||||
} else if false && cnt <= int64(128*gc.Widthptr) {
|
||||
p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
|
||||
} else if cnt <= int64(128*gc.Widthptr) {
|
||||
p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
|
||||
p.Reg = ppc64.REGSP
|
||||
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
|
||||
f := gc.Sysfunc("duffzero")
|
||||
|
|
@ -83,7 +80,7 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
|
|||
gc.Afunclit(&p.To, f)
|
||||
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
|
||||
} else {
|
||||
p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
|
||||
p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
|
||||
p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
|
||||
p.Reg = ppc64.REGSP
|
||||
p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
|
||||
|
|
@ -443,10 +440,7 @@ func clearfat(nl *gc.Node) {
|
|||
|
||||
// The loop leaves R3 on the last zeroed dword
|
||||
boff = 8
|
||||
// TODO(dfc): https://golang.org/issue/12108
|
||||
// If DUFFZERO is used inside a tail call (see genwrapper) it will
|
||||
// overwrite the link register.
|
||||
} else if false && q >= 4 {
|
||||
} else if q >= 4 {
|
||||
p := gins(ppc64.ASUB, nil, &dst)
|
||||
p.From.Type = obj.TYPE_CONST
|
||||
p.From.Offset = 8
|
||||
|
|
|
|||
|
|
@ -545,30 +545,18 @@ hard:
|
|||
return
|
||||
}
|
||||
|
||||
func intLiteral(n *gc.Node) (x int64, ok bool) {
|
||||
switch {
|
||||
case n == nil:
|
||||
return
|
||||
case gc.Isconst(n, gc.CTINT):
|
||||
return n.Int(), true
|
||||
case gc.Isconst(n, gc.CTBOOL):
|
||||
return int64(obj.Bool2int(n.Bool())), true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// gins is called by the front end.
|
||||
// It synthesizes some multiple-instruction sequences
|
||||
// so the front end can stay simpler.
|
||||
func gins(as int, f, t *gc.Node) *obj.Prog {
|
||||
if as >= obj.A_ARCHSPECIFIC {
|
||||
if x, ok := intLiteral(f); ok {
|
||||
if x, ok := f.IntLiteral(); ok {
|
||||
ginscon(as, x, t)
|
||||
return nil // caller must not use
|
||||
}
|
||||
}
|
||||
if as == ppc64.ACMP || as == ppc64.ACMPU {
|
||||
if x, ok := intLiteral(t); ok {
|
||||
if x, ok := t.IntLiteral(); ok {
|
||||
ginscon2(as, f, x)
|
||||
return nil // caller must not use
|
||||
}
|
||||
|
|
|
|||
|
|
@ -183,89 +183,89 @@ func proginfo(p *obj.Prog) {
|
|||
// for the "base" form of each instruction. On the first call to
|
||||
// as2variant or variant2as, we'll add the variants to the table.
|
||||
var varianttable = [ppc64.ALAST][4]int{
|
||||
ppc64.AADD: [4]int{ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
|
||||
ppc64.AADDC: [4]int{ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
|
||||
ppc64.AADDE: [4]int{ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
|
||||
ppc64.AADDME: [4]int{ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC},
|
||||
ppc64.AADDZE: [4]int{ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC},
|
||||
ppc64.AAND: [4]int{ppc64.AAND, ppc64.AANDCC, 0, 0},
|
||||
ppc64.AANDN: [4]int{ppc64.AANDN, ppc64.AANDNCC, 0, 0},
|
||||
ppc64.ACNTLZD: [4]int{ppc64.ACNTLZD, ppc64.ACNTLZDCC, 0, 0},
|
||||
ppc64.ACNTLZW: [4]int{ppc64.ACNTLZW, ppc64.ACNTLZWCC, 0, 0},
|
||||
ppc64.ADIVD: [4]int{ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC},
|
||||
ppc64.ADIVDU: [4]int{ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC},
|
||||
ppc64.ADIVW: [4]int{ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC},
|
||||
ppc64.ADIVWU: [4]int{ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC},
|
||||
ppc64.AEQV: [4]int{ppc64.AEQV, ppc64.AEQVCC, 0, 0},
|
||||
ppc64.AEXTSB: [4]int{ppc64.AEXTSB, ppc64.AEXTSBCC, 0, 0},
|
||||
ppc64.AEXTSH: [4]int{ppc64.AEXTSH, ppc64.AEXTSHCC, 0, 0},
|
||||
ppc64.AEXTSW: [4]int{ppc64.AEXTSW, ppc64.AEXTSWCC, 0, 0},
|
||||
ppc64.AFABS: [4]int{ppc64.AFABS, ppc64.AFABSCC, 0, 0},
|
||||
ppc64.AFADD: [4]int{ppc64.AFADD, ppc64.AFADDCC, 0, 0},
|
||||
ppc64.AFADDS: [4]int{ppc64.AFADDS, ppc64.AFADDSCC, 0, 0},
|
||||
ppc64.AFCFID: [4]int{ppc64.AFCFID, ppc64.AFCFIDCC, 0, 0},
|
||||
ppc64.AFCTID: [4]int{ppc64.AFCTID, ppc64.AFCTIDCC, 0, 0},
|
||||
ppc64.AFCTIDZ: [4]int{ppc64.AFCTIDZ, ppc64.AFCTIDZCC, 0, 0},
|
||||
ppc64.AFCTIW: [4]int{ppc64.AFCTIW, ppc64.AFCTIWCC, 0, 0},
|
||||
ppc64.AFCTIWZ: [4]int{ppc64.AFCTIWZ, ppc64.AFCTIWZCC, 0, 0},
|
||||
ppc64.AFDIV: [4]int{ppc64.AFDIV, ppc64.AFDIVCC, 0, 0},
|
||||
ppc64.AFDIVS: [4]int{ppc64.AFDIVS, ppc64.AFDIVSCC, 0, 0},
|
||||
ppc64.AFMADD: [4]int{ppc64.AFMADD, ppc64.AFMADDCC, 0, 0},
|
||||
ppc64.AFMADDS: [4]int{ppc64.AFMADDS, ppc64.AFMADDSCC, 0, 0},
|
||||
ppc64.AFMOVD: [4]int{ppc64.AFMOVD, ppc64.AFMOVDCC, 0, 0},
|
||||
ppc64.AFMSUB: [4]int{ppc64.AFMSUB, ppc64.AFMSUBCC, 0, 0},
|
||||
ppc64.AFMSUBS: [4]int{ppc64.AFMSUBS, ppc64.AFMSUBSCC, 0, 0},
|
||||
ppc64.AFMUL: [4]int{ppc64.AFMUL, ppc64.AFMULCC, 0, 0},
|
||||
ppc64.AFMULS: [4]int{ppc64.AFMULS, ppc64.AFMULSCC, 0, 0},
|
||||
ppc64.AFNABS: [4]int{ppc64.AFNABS, ppc64.AFNABSCC, 0, 0},
|
||||
ppc64.AFNEG: [4]int{ppc64.AFNEG, ppc64.AFNEGCC, 0, 0},
|
||||
ppc64.AFNMADD: [4]int{ppc64.AFNMADD, ppc64.AFNMADDCC, 0, 0},
|
||||
ppc64.AFNMADDS: [4]int{ppc64.AFNMADDS, ppc64.AFNMADDSCC, 0, 0},
|
||||
ppc64.AFNMSUB: [4]int{ppc64.AFNMSUB, ppc64.AFNMSUBCC, 0, 0},
|
||||
ppc64.AFNMSUBS: [4]int{ppc64.AFNMSUBS, ppc64.AFNMSUBSCC, 0, 0},
|
||||
ppc64.AFRES: [4]int{ppc64.AFRES, ppc64.AFRESCC, 0, 0},
|
||||
ppc64.AFRSP: [4]int{ppc64.AFRSP, ppc64.AFRSPCC, 0, 0},
|
||||
ppc64.AFRSQRTE: [4]int{ppc64.AFRSQRTE, ppc64.AFRSQRTECC, 0, 0},
|
||||
ppc64.AFSEL: [4]int{ppc64.AFSEL, ppc64.AFSELCC, 0, 0},
|
||||
ppc64.AFSQRT: [4]int{ppc64.AFSQRT, ppc64.AFSQRTCC, 0, 0},
|
||||
ppc64.AFSQRTS: [4]int{ppc64.AFSQRTS, ppc64.AFSQRTSCC, 0, 0},
|
||||
ppc64.AFSUB: [4]int{ppc64.AFSUB, ppc64.AFSUBCC, 0, 0},
|
||||
ppc64.AFSUBS: [4]int{ppc64.AFSUBS, ppc64.AFSUBSCC, 0, 0},
|
||||
ppc64.AMTFSB0: [4]int{ppc64.AMTFSB0, ppc64.AMTFSB0CC, 0, 0},
|
||||
ppc64.AMTFSB1: [4]int{ppc64.AMTFSB1, ppc64.AMTFSB1CC, 0, 0},
|
||||
ppc64.AMULHD: [4]int{ppc64.AMULHD, ppc64.AMULHDCC, 0, 0},
|
||||
ppc64.AMULHDU: [4]int{ppc64.AMULHDU, ppc64.AMULHDUCC, 0, 0},
|
||||
ppc64.AMULHW: [4]int{ppc64.AMULHW, ppc64.AMULHWCC, 0, 0},
|
||||
ppc64.AMULHWU: [4]int{ppc64.AMULHWU, ppc64.AMULHWUCC, 0, 0},
|
||||
ppc64.AMULLD: [4]int{ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC},
|
||||
ppc64.AMULLW: [4]int{ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC},
|
||||
ppc64.ANAND: [4]int{ppc64.ANAND, ppc64.ANANDCC, 0, 0},
|
||||
ppc64.ANEG: [4]int{ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC},
|
||||
ppc64.ANOR: [4]int{ppc64.ANOR, ppc64.ANORCC, 0, 0},
|
||||
ppc64.AOR: [4]int{ppc64.AOR, ppc64.AORCC, 0, 0},
|
||||
ppc64.AORN: [4]int{ppc64.AORN, ppc64.AORNCC, 0, 0},
|
||||
ppc64.AREM: [4]int{ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC},
|
||||
ppc64.AREMD: [4]int{ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC},
|
||||
ppc64.AREMDU: [4]int{ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC},
|
||||
ppc64.AREMU: [4]int{ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC},
|
||||
ppc64.ARLDC: [4]int{ppc64.ARLDC, ppc64.ARLDCCC, 0, 0},
|
||||
ppc64.ARLDCL: [4]int{ppc64.ARLDCL, ppc64.ARLDCLCC, 0, 0},
|
||||
ppc64.ARLDCR: [4]int{ppc64.ARLDCR, ppc64.ARLDCRCC, 0, 0},
|
||||
ppc64.ARLDMI: [4]int{ppc64.ARLDMI, ppc64.ARLDMICC, 0, 0},
|
||||
ppc64.ARLWMI: [4]int{ppc64.ARLWMI, ppc64.ARLWMICC, 0, 0},
|
||||
ppc64.ARLWNM: [4]int{ppc64.ARLWNM, ppc64.ARLWNMCC, 0, 0},
|
||||
ppc64.ASLD: [4]int{ppc64.ASLD, ppc64.ASLDCC, 0, 0},
|
||||
ppc64.ASLW: [4]int{ppc64.ASLW, ppc64.ASLWCC, 0, 0},
|
||||
ppc64.ASRAD: [4]int{ppc64.ASRAD, ppc64.ASRADCC, 0, 0},
|
||||
ppc64.ASRAW: [4]int{ppc64.ASRAW, ppc64.ASRAWCC, 0, 0},
|
||||
ppc64.ASRD: [4]int{ppc64.ASRD, ppc64.ASRDCC, 0, 0},
|
||||
ppc64.ASRW: [4]int{ppc64.ASRW, ppc64.ASRWCC, 0, 0},
|
||||
ppc64.ASUB: [4]int{ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC},
|
||||
ppc64.ASUBC: [4]int{ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC},
|
||||
ppc64.ASUBE: [4]int{ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC},
|
||||
ppc64.ASUBME: [4]int{ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC},
|
||||
ppc64.ASUBZE: [4]int{ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC},
|
||||
ppc64.AXOR: [4]int{ppc64.AXOR, ppc64.AXORCC, 0, 0},
|
||||
ppc64.AADD: {ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
|
||||
ppc64.AADDC: {ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
|
||||
ppc64.AADDE: {ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
|
||||
ppc64.AADDME: {ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC},
|
||||
ppc64.AADDZE: {ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC},
|
||||
ppc64.AAND: {ppc64.AAND, ppc64.AANDCC, 0, 0},
|
||||
ppc64.AANDN: {ppc64.AANDN, ppc64.AANDNCC, 0, 0},
|
||||
ppc64.ACNTLZD: {ppc64.ACNTLZD, ppc64.ACNTLZDCC, 0, 0},
|
||||
ppc64.ACNTLZW: {ppc64.ACNTLZW, ppc64.ACNTLZWCC, 0, 0},
|
||||
ppc64.ADIVD: {ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC},
|
||||
ppc64.ADIVDU: {ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC},
|
||||
ppc64.ADIVW: {ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC},
|
||||
ppc64.ADIVWU: {ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC},
|
||||
ppc64.AEQV: {ppc64.AEQV, ppc64.AEQVCC, 0, 0},
|
||||
ppc64.AEXTSB: {ppc64.AEXTSB, ppc64.AEXTSBCC, 0, 0},
|
||||
ppc64.AEXTSH: {ppc64.AEXTSH, ppc64.AEXTSHCC, 0, 0},
|
||||
ppc64.AEXTSW: {ppc64.AEXTSW, ppc64.AEXTSWCC, 0, 0},
|
||||
ppc64.AFABS: {ppc64.AFABS, ppc64.AFABSCC, 0, 0},
|
||||
ppc64.AFADD: {ppc64.AFADD, ppc64.AFADDCC, 0, 0},
|
||||
ppc64.AFADDS: {ppc64.AFADDS, ppc64.AFADDSCC, 0, 0},
|
||||
ppc64.AFCFID: {ppc64.AFCFID, ppc64.AFCFIDCC, 0, 0},
|
||||
ppc64.AFCTID: {ppc64.AFCTID, ppc64.AFCTIDCC, 0, 0},
|
||||
ppc64.AFCTIDZ: {ppc64.AFCTIDZ, ppc64.AFCTIDZCC, 0, 0},
|
||||
ppc64.AFCTIW: {ppc64.AFCTIW, ppc64.AFCTIWCC, 0, 0},
|
||||
ppc64.AFCTIWZ: {ppc64.AFCTIWZ, ppc64.AFCTIWZCC, 0, 0},
|
||||
ppc64.AFDIV: {ppc64.AFDIV, ppc64.AFDIVCC, 0, 0},
|
||||
ppc64.AFDIVS: {ppc64.AFDIVS, ppc64.AFDIVSCC, 0, 0},
|
||||
ppc64.AFMADD: {ppc64.AFMADD, ppc64.AFMADDCC, 0, 0},
|
||||
ppc64.AFMADDS: {ppc64.AFMADDS, ppc64.AFMADDSCC, 0, 0},
|
||||
ppc64.AFMOVD: {ppc64.AFMOVD, ppc64.AFMOVDCC, 0, 0},
|
||||
ppc64.AFMSUB: {ppc64.AFMSUB, ppc64.AFMSUBCC, 0, 0},
|
||||
ppc64.AFMSUBS: {ppc64.AFMSUBS, ppc64.AFMSUBSCC, 0, 0},
|
||||
ppc64.AFMUL: {ppc64.AFMUL, ppc64.AFMULCC, 0, 0},
|
||||
ppc64.AFMULS: {ppc64.AFMULS, ppc64.AFMULSCC, 0, 0},
|
||||
ppc64.AFNABS: {ppc64.AFNABS, ppc64.AFNABSCC, 0, 0},
|
||||
ppc64.AFNEG: {ppc64.AFNEG, ppc64.AFNEGCC, 0, 0},
|
||||
ppc64.AFNMADD: {ppc64.AFNMADD, ppc64.AFNMADDCC, 0, 0},
|
||||
ppc64.AFNMADDS: {ppc64.AFNMADDS, ppc64.AFNMADDSCC, 0, 0},
|
||||
ppc64.AFNMSUB: {ppc64.AFNMSUB, ppc64.AFNMSUBCC, 0, 0},
|
||||
ppc64.AFNMSUBS: {ppc64.AFNMSUBS, ppc64.AFNMSUBSCC, 0, 0},
|
||||
ppc64.AFRES: {ppc64.AFRES, ppc64.AFRESCC, 0, 0},
|
||||
ppc64.AFRSP: {ppc64.AFRSP, ppc64.AFRSPCC, 0, 0},
|
||||
ppc64.AFRSQRTE: {ppc64.AFRSQRTE, ppc64.AFRSQRTECC, 0, 0},
|
||||
ppc64.AFSEL: {ppc64.AFSEL, ppc64.AFSELCC, 0, 0},
|
||||
ppc64.AFSQRT: {ppc64.AFSQRT, ppc64.AFSQRTCC, 0, 0},
|
||||
ppc64.AFSQRTS: {ppc64.AFSQRTS, ppc64.AFSQRTSCC, 0, 0},
|
||||
ppc64.AFSUB: {ppc64.AFSUB, ppc64.AFSUBCC, 0, 0},
|
||||
ppc64.AFSUBS: {ppc64.AFSUBS, ppc64.AFSUBSCC, 0, 0},
|
||||
ppc64.AMTFSB0: {ppc64.AMTFSB0, ppc64.AMTFSB0CC, 0, 0},
|
||||
ppc64.AMTFSB1: {ppc64.AMTFSB1, ppc64.AMTFSB1CC, 0, 0},
|
||||
ppc64.AMULHD: {ppc64.AMULHD, ppc64.AMULHDCC, 0, 0},
|
||||
ppc64.AMULHDU: {ppc64.AMULHDU, ppc64.AMULHDUCC, 0, 0},
|
||||
ppc64.AMULHW: {ppc64.AMULHW, ppc64.AMULHWCC, 0, 0},
|
||||
ppc64.AMULHWU: {ppc64.AMULHWU, ppc64.AMULHWUCC, 0, 0},
|
||||
ppc64.AMULLD: {ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC},
|
||||
ppc64.AMULLW: {ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC},
|
||||
ppc64.ANAND: {ppc64.ANAND, ppc64.ANANDCC, 0, 0},
|
||||
ppc64.ANEG: {ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC},
|
||||
ppc64.ANOR: {ppc64.ANOR, ppc64.ANORCC, 0, 0},
|
||||
ppc64.AOR: {ppc64.AOR, ppc64.AORCC, 0, 0},
|
||||
ppc64.AORN: {ppc64.AORN, ppc64.AORNCC, 0, 0},
|
||||
ppc64.AREM: {ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC},
|
||||
ppc64.AREMD: {ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC},
|
||||
ppc64.AREMDU: {ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC},
|
||||
ppc64.AREMU: {ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC},
|
||||
ppc64.ARLDC: {ppc64.ARLDC, ppc64.ARLDCCC, 0, 0},
|
||||
ppc64.ARLDCL: {ppc64.ARLDCL, ppc64.ARLDCLCC, 0, 0},
|
||||
ppc64.ARLDCR: {ppc64.ARLDCR, ppc64.ARLDCRCC, 0, 0},
|
||||
ppc64.ARLDMI: {ppc64.ARLDMI, ppc64.ARLDMICC, 0, 0},
|
||||
ppc64.ARLWMI: {ppc64.ARLWMI, ppc64.ARLWMICC, 0, 0},
|
||||
ppc64.ARLWNM: {ppc64.ARLWNM, ppc64.ARLWNMCC, 0, 0},
|
||||
ppc64.ASLD: {ppc64.ASLD, ppc64.ASLDCC, 0, 0},
|
||||
ppc64.ASLW: {ppc64.ASLW, ppc64.ASLWCC, 0, 0},
|
||||
ppc64.ASRAD: {ppc64.ASRAD, ppc64.ASRADCC, 0, 0},
|
||||
ppc64.ASRAW: {ppc64.ASRAW, ppc64.ASRAWCC, 0, 0},
|
||||
ppc64.ASRD: {ppc64.ASRD, ppc64.ASRDCC, 0, 0},
|
||||
ppc64.ASRW: {ppc64.ASRW, ppc64.ASRWCC, 0, 0},
|
||||
ppc64.ASUB: {ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC},
|
||||
ppc64.ASUBC: {ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC},
|
||||
ppc64.ASUBE: {ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC},
|
||||
ppc64.ASUBME: {ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC},
|
||||
ppc64.ASUBZE: {ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC},
|
||||
ppc64.AXOR: {ppc64.AXOR, ppc64.AXORCC, 0, 0},
|
||||
}
|
||||
|
||||
var initvariants_initialized int
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ func regnames(n *int) []string {
|
|||
|
||||
func excludedregs() uint64 {
|
||||
// Exclude registers with fixed functions
|
||||
regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS))
|
||||
regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS) | RtoB(ppc64.REGTMP))
|
||||
|
||||
// Also exclude floating point registers with fixed constants
|
||||
regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
|
||||
|
|
|
|||
|
|
@ -602,8 +602,10 @@
|
|||
(MOVQstore destptr (MOVQconst [0]) mem))))
|
||||
|
||||
// Medium zeroing uses a duff device.
|
||||
(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 ->
|
||||
(DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVQconst [0]) mem)
|
||||
(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 ->
|
||||
(Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
|
||||
(Zero [size] destptr mem) && size <= 1024 && size%16 == 0 ->
|
||||
(DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
|
||||
|
||||
// Large zeroing uses REP STOSQ.
|
||||
(Zero [size] destptr mem) && size > 1024 && size%8 == 0 ->
|
||||
|
|
|
|||
|
|
@ -388,10 +388,11 @@ func init() {
|
|||
{
|
||||
name: "DUFFZERO",
|
||||
reg: regInfo{
|
||||
inputs: []regMask{buildReg("DI"), buildReg("AX")},
|
||||
inputs: []regMask{buildReg("DI"), buildReg("X0")},
|
||||
clobbers: buildReg("DI FLAGS"),
|
||||
},
|
||||
},
|
||||
{name: "MOVOconst", reg: regInfo{nil, 0, []regMask{fp}}, typ: "Float64"},
|
||||
|
||||
// arg0 = address of memory to zero
|
||||
// arg1 = # of 8-byte words to zero
|
||||
|
|
|
|||
|
|
@ -261,6 +261,7 @@ const (
|
|||
OpAMD64MOVQstore
|
||||
OpAMD64MOVQstoreidx8
|
||||
OpAMD64DUFFZERO
|
||||
OpAMD64MOVOconst
|
||||
OpAMD64REPSTOSQ
|
||||
OpAMD64CALLstatic
|
||||
OpAMD64CALLclosure
|
||||
|
|
@ -3040,11 +3041,19 @@ var opcodeTable = [...]opInfo{
|
|||
reg: regInfo{
|
||||
inputs: []inputInfo{
|
||||
{0, 128}, // .DI
|
||||
{1, 1}, // .AX
|
||||
{1, 65536}, // .X0
|
||||
},
|
||||
clobbers: 8589934720, // .DI .FLAGS
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MOVOconst",
|
||||
reg: regInfo{
|
||||
outputs: []regMask{
|
||||
4294901760, // .X0 .X1 .X2 .X3 .X4 .X5 .X6 .X7 .X8 .X9 .X10 .X11 .X12 .X13 .X14 .X15
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "REPSTOSQ",
|
||||
reg: regInfo{
|
||||
|
|
|
|||
|
|
@ -179,23 +179,21 @@ func f2i(f float64) int64 {
|
|||
return int64(math.Float64bits(f))
|
||||
}
|
||||
|
||||
// DUFFZERO consists of repeated blocks of 4 MOVs + ADD,
|
||||
// with 4 STOSQs at the very end.
|
||||
// The trailing STOSQs prevent the need for a DI preadjustment
|
||||
// for small numbers of words to clear.
|
||||
// DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD,
|
||||
// See runtime/mkduff.go.
|
||||
const (
|
||||
dzBlocks = 31 // number of MOV/ADD blocks
|
||||
dzBlocks = 16 // number of MOV/ADD blocks
|
||||
dzBlockLen = 4 // number of clears per block
|
||||
dzBlockSize = 19 // size of instructions in a single block
|
||||
dzMovSize = 4 // size of single MOV instruction w/ offset
|
||||
dzAddSize = 4 // size of single ADD instruction
|
||||
dzDIStep = 8 // number of bytes cleared by each MOV instruction
|
||||
dzClearStep = 16 // number of bytes cleared by each MOV instruction
|
||||
|
||||
dzTailLen = 4 // number of final STOSQ instructions
|
||||
dzTailSize = 2 // size of single STOSQ instruction
|
||||
|
||||
dzSize = dzBlocks*dzBlockSize + dzTailLen*dzTailSize // total size of DUFFZERO routine
|
||||
dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block
|
||||
dzSize = dzBlocks * dzBlockSize
|
||||
)
|
||||
|
||||
func duffStart(size int64) int64 {
|
||||
|
|
@ -210,20 +208,19 @@ func duffAdj(size int64) int64 {
|
|||
// duff returns the offset (from duffzero, in bytes) and pointer adjust (in bytes)
|
||||
// required to use the duffzero mechanism for a block of the given size.
|
||||
func duff(size int64) (int64, int64) {
|
||||
if size < 32 || size > 1024 || size%8 != 0 {
|
||||
if size < 32 || size > 1024 || size%dzClearStep != 0 {
|
||||
panic("bad duffzero size")
|
||||
}
|
||||
// TODO: arch-dependent
|
||||
off := int64(dzSize)
|
||||
off -= dzTailLen * dzTailSize
|
||||
size -= dzTailLen * dzDIStep
|
||||
q := size / dzDIStep
|
||||
blocks, singles := q/dzBlockLen, q%dzBlockLen
|
||||
off -= dzBlockSize * blocks
|
||||
steps := size / dzClearStep
|
||||
blocks := steps / dzBlockLen
|
||||
steps %= dzBlockLen
|
||||
off := dzBlockSize * (dzBlocks - blocks)
|
||||
var adj int64
|
||||
if singles > 0 {
|
||||
off -= dzAddSize + dzMovSize*singles
|
||||
adj -= dzDIStep * (dzBlockLen - singles)
|
||||
if steps != 0 {
|
||||
off -= dzAddSize
|
||||
off -= dzMovSize * steps
|
||||
adj -= dzClearStep * (dzBlockLen - steps)
|
||||
}
|
||||
return off, adj
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10635,14 +10635,48 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
|
|||
end282b5e36693f06e2cd1ac563e0d419b5:
|
||||
;
|
||||
// match: (Zero [size] destptr mem)
|
||||
// cond: size <= 1024 && size%8 == 0
|
||||
// result: (DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVQconst [0]) mem)
|
||||
// cond: size <= 1024 && size%8 == 0 && size%16 != 0
|
||||
// result: (Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
|
||||
{
|
||||
size := v.AuxInt
|
||||
destptr := v.Args[0]
|
||||
mem := v.Args[1]
|
||||
if !(size <= 1024 && size%8 == 0) {
|
||||
goto endfae59ebc96f670276efea844c3b302ac
|
||||
if !(size <= 1024 && size%8 == 0 && size%16 != 0) {
|
||||
goto end240266449c3e493db1c3b38a78682ff0
|
||||
}
|
||||
v.Op = OpZero
|
||||
v.AuxInt = 0
|
||||
v.Aux = nil
|
||||
v.resetArgs()
|
||||
v.AuxInt = size - 8
|
||||
v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, TypeInvalid)
|
||||
v0.AuxInt = 8
|
||||
v0.AddArg(destptr)
|
||||
v0.Type = config.fe.TypeUInt64()
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeInvalid)
|
||||
v1.AddArg(destptr)
|
||||
v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, TypeInvalid)
|
||||
v2.AuxInt = 0
|
||||
v2.Type = config.fe.TypeUInt64()
|
||||
v1.AddArg(v2)
|
||||
v1.AddArg(mem)
|
||||
v1.Type = TypeMem
|
||||
v.AddArg(v1)
|
||||
return true
|
||||
}
|
||||
goto end240266449c3e493db1c3b38a78682ff0
|
||||
end240266449c3e493db1c3b38a78682ff0:
|
||||
;
|
||||
// match: (Zero [size] destptr mem)
|
||||
// cond: size <= 1024 && size%16 == 0
|
||||
// result: (DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
|
||||
{
|
||||
size := v.AuxInt
|
||||
destptr := v.Args[0]
|
||||
mem := v.Args[1]
|
||||
if !(size <= 1024 && size%16 == 0) {
|
||||
goto endf508bb887eee9119069b22c23dbca138
|
||||
}
|
||||
v.Op = OpAMD64DUFFZERO
|
||||
v.AuxInt = 0
|
||||
|
|
@ -10654,15 +10688,15 @@ func rewriteValueAMD64(v *Value, config *Config) bool {
|
|||
v0.AddArg(destptr)
|
||||
v0.Type = config.fe.TypeUInt64()
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, TypeInvalid)
|
||||
v1 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInvalid)
|
||||
v1.AuxInt = 0
|
||||
v1.Type = config.fe.TypeUInt64()
|
||||
v1.Type = config.fe.TypeFloat64()
|
||||
v.AddArg(v1)
|
||||
v.AddArg(mem)
|
||||
return true
|
||||
}
|
||||
goto endfae59ebc96f670276efea844c3b302ac
|
||||
endfae59ebc96f670276efea844c3b302ac:
|
||||
goto endf508bb887eee9119069b22c23dbca138
|
||||
endf508bb887eee9119069b22c23dbca138:
|
||||
;
|
||||
// match: (Zero [size] destptr mem)
|
||||
// cond: size > 1024 && size%8 == 0
|
||||
|
|
|
|||
116
src/cmd/dist/build.go
vendored
116
src/cmd/dist/build.go
vendored
|
|
@ -11,7 +11,9 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Initialization for any invocation.
|
||||
|
|
@ -487,9 +489,20 @@ var gentab = []struct {
|
|||
{"anames9.c", nil},
|
||||
}
|
||||
|
||||
// installed maps from a dir name (as given to install) to a chan
|
||||
// closed when the dir's package is installed.
|
||||
var installed = make(map[string]chan struct{})
|
||||
|
||||
// install installs the library, package, or binary associated with dir,
|
||||
// which is relative to $GOROOT/src.
|
||||
func install(dir string) {
|
||||
if ch, ok := installed[dir]; ok {
|
||||
defer close(ch)
|
||||
}
|
||||
for _, dep := range builddeps[dir] {
|
||||
<-installed[dep]
|
||||
}
|
||||
|
||||
if vflag > 0 {
|
||||
if goos != gohostos || goarch != gohostarch {
|
||||
errprintf("%s (%s/%s)\n", dir, goos, goarch)
|
||||
|
|
@ -498,6 +511,9 @@ func install(dir string) {
|
|||
}
|
||||
}
|
||||
|
||||
workdir := pathf("%s/%s", workdir, dir)
|
||||
xmkdirall(workdir)
|
||||
|
||||
var clean []string
|
||||
defer func() {
|
||||
for _, name := range clean {
|
||||
|
|
@ -610,6 +626,8 @@ func install(dir string) {
|
|||
pathf("%s/src/runtime/textflag.h", goroot), 0)
|
||||
copyfile(pathf("%s/pkg/include/funcdata.h", goroot),
|
||||
pathf("%s/src/runtime/funcdata.h", goroot), 0)
|
||||
copyfile(pathf("%s/pkg/include/asm_ppc64x.h", goroot),
|
||||
pathf("%s/src/runtime/asm_ppc64x.h", goroot), 0)
|
||||
}
|
||||
|
||||
// Generate any missing files; regenerate existing ones.
|
||||
|
|
@ -673,6 +691,7 @@ func install(dir string) {
|
|||
run(path, CheckExit|ShowOutput, compile...)
|
||||
|
||||
// Compile the files.
|
||||
var wg sync.WaitGroup
|
||||
for _, p := range files {
|
||||
if !strings.HasSuffix(p, ".s") {
|
||||
continue
|
||||
|
|
@ -695,14 +714,14 @@ func install(dir string) {
|
|||
// Change the last character of the output file (which was c or s).
|
||||
b = b[:len(b)-1] + "o"
|
||||
compile = append(compile, "-o", b, p)
|
||||
bgrun(path, compile...)
|
||||
bgrun(&wg, path, compile...)
|
||||
|
||||
link = append(link, b)
|
||||
if doclean {
|
||||
clean = append(clean, b)
|
||||
}
|
||||
}
|
||||
bgwait()
|
||||
bgwait(&wg)
|
||||
|
||||
if ispackcmd {
|
||||
xremove(link[targ])
|
||||
|
|
@ -839,62 +858,19 @@ func dopack(dst, src string, extra []string) {
|
|||
writefile(bdst.String(), dst, 0)
|
||||
}
|
||||
|
||||
// buildorder records the order of builds for the 'go bootstrap' command.
|
||||
// The Go packages and commands must be in dependency order,
|
||||
// maintained by hand, but the order doesn't change often.
|
||||
var buildorder = []string{
|
||||
// Go libraries and programs for bootstrap.
|
||||
"runtime",
|
||||
"errors",
|
||||
"sync/atomic",
|
||||
"sync",
|
||||
"internal/singleflight",
|
||||
"io",
|
||||
"unicode",
|
||||
"unicode/utf8",
|
||||
"unicode/utf16",
|
||||
"bytes",
|
||||
"math",
|
||||
"strings",
|
||||
"strconv",
|
||||
"bufio",
|
||||
"sort",
|
||||
"container/heap",
|
||||
"encoding/base64",
|
||||
"syscall",
|
||||
"internal/syscall/windows/registry",
|
||||
"time",
|
||||
"internal/syscall/windows",
|
||||
"os",
|
||||
"reflect",
|
||||
"fmt",
|
||||
"encoding",
|
||||
"encoding/binary",
|
||||
"encoding/json",
|
||||
"flag",
|
||||
"path/filepath",
|
||||
"path",
|
||||
"io/ioutil",
|
||||
"log",
|
||||
"regexp/syntax",
|
||||
"regexp",
|
||||
"go/token",
|
||||
"go/scanner",
|
||||
"go/ast",
|
||||
"go/parser",
|
||||
"os/exec",
|
||||
"os/signal",
|
||||
"net/url",
|
||||
"text/template/parse",
|
||||
"text/template",
|
||||
"go/doc",
|
||||
"go/build",
|
||||
"hash",
|
||||
"crypto",
|
||||
"crypto/sha1",
|
||||
"debug/dwarf",
|
||||
"debug/elf",
|
||||
"cmd/go",
|
||||
// builddeps records the build dependencies for the 'go bootstrap' command.
|
||||
// It is a map[string][]string and generated by mkdeps.bash into deps.go.
|
||||
|
||||
// buildlist is the list of directories being built, sorted by name.
|
||||
var buildlist = makeBuildlist()
|
||||
|
||||
func makeBuildlist() []string {
|
||||
var all []string
|
||||
for dir := range builddeps {
|
||||
all = append(all, dir)
|
||||
}
|
||||
sort.Strings(all)
|
||||
return all
|
||||
}
|
||||
|
||||
var runtimegen = []string{
|
||||
|
|
@ -903,7 +879,7 @@ var runtimegen = []string{
|
|||
}
|
||||
|
||||
func clean() {
|
||||
for _, name := range buildorder {
|
||||
for _, name := range buildlist {
|
||||
path := pathf("%s/src/%s", goroot, name)
|
||||
// Remove generated files.
|
||||
for _, elem := range xreaddir(path) {
|
||||
|
|
@ -1044,19 +1020,30 @@ func cmdbootstrap() {
|
|||
// than in a standard release like Go 1.4, so don't do this rebuild by default.
|
||||
if false {
|
||||
xprintf("##### Building Go toolchain using itself.\n")
|
||||
for _, dir := range buildorder {
|
||||
if dir == "cmd/go" {
|
||||
break
|
||||
for _, dir := range buildlist {
|
||||
installed[dir] = make(chan struct{})
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
for _, dir := range builddeps["cmd/go"] {
|
||||
wg.Add(1)
|
||||
dir := dir
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
install(dir)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
xprintf("\n")
|
||||
}
|
||||
|
||||
xprintf("##### Building go_bootstrap for host, %s/%s.\n", gohostos, gohostarch)
|
||||
for _, dir := range buildorder {
|
||||
install(dir)
|
||||
for _, dir := range buildlist {
|
||||
installed[dir] = make(chan struct{})
|
||||
}
|
||||
for _, dir := range buildlist {
|
||||
go install(dir)
|
||||
}
|
||||
<-installed["cmd/go"]
|
||||
|
||||
goos = oldgoos
|
||||
goarch = oldgoarch
|
||||
|
|
@ -1065,6 +1052,7 @@ func cmdbootstrap() {
|
|||
|
||||
// Build runtime for actual goos/goarch too.
|
||||
if goos != gohostos || goarch != gohostarch {
|
||||
installed["runtime"] = make(chan struct{})
|
||||
install("runtime")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue